diff --git a/Makefile.in b/Makefile.in index 5f0ad31cb..ead92c9a4 100644 --- a/Makefile.in +++ b/Makefile.in @@ -55,7 +55,7 @@ endif system_unit_files = contrib/systemd/onedrive@.service user_unit_files = contrib/systemd/onedrive.service -DOCFILES = README.md config LICENSE CHANGELOG.md docs/Docker.md docs/INSTALL.md docs/SharePoint-Shared-Libraries.md docs/USAGE.md docs/BusinessSharedFolders.md docs/advanced-usage.md docs/application-security.md +DOCFILES = readme.md config LICENSE changelog.md docs/advanced-usage.md docs/application-config-options.md docs/application-security.md docs/business-shared-folders.md docs/docker.md docs/install.md docs/national-cloud-deployments.md docs/podman.md docs/privacy-policy.md docs/sharepoint-libraries.md docs/terms-of-service.md docs/ubuntu-package-install.md docs/usage.md ifneq ("$(wildcard /etc/redhat-release)","") RHEL = $(shell cat /etc/redhat-release | grep -E "(Red Hat Enterprise Linux|CentOS)" | wc -l) @@ -66,20 +66,19 @@ RHEL_VERSION = 0 endif SOURCES = \ - src/config.d \ - src/itemdb.d \ - src/log.d \ src/main.d \ - src/monitor.d \ - src/onedrive.d \ + src/config.d \ + src/log.d \ + src/util.d \ src/qxor.d \ - src/selective.d \ - src/sqlite.d \ + src/curlEngine.d \ + src/onedrive.d \ src/sync.d \ - src/upload.d \ - src/util.d \ + src/itemdb.d \ + src/sqlite.d \ + src/clientSideFiltering.d \ src/progress.d \ - src/arsd/cgi.d + src/monitor.d ifeq ($(NOTIFICATIONS),yes) SOURCES += src/notifications/notify.d src/notifications/dnotify.d diff --git a/CHANGELOG.md b/changelog.md similarity index 99% rename from CHANGELOG.md rename to changelog.md index a6d2d3f1b..8f7f357ad 100644 --- a/CHANGELOG.md +++ b/changelog.md @@ -2,6 +2,13 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## 2.5.0 - TBA + + +### Changed +* Renamed various documentation files to align with document content + + ## 2.4.25 - 2023-06-21 ### Fixed * Fixed that the application was reporting as v2.2.24 when in fact it was v2.4.24 (release tagging issue) diff --git a/configure b/configure index f68a775cc..ca4f2f21c 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for onedrive v2.4.25. +# Generated by GNU Autoconf 2.69 for onedrive v2.5.0-alpha-2. # # Report bugs to . # @@ -579,8 +579,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='onedrive' PACKAGE_TARNAME='onedrive' -PACKAGE_VERSION='v2.4.25' -PACKAGE_STRING='onedrive v2.4.25' +PACKAGE_VERSION='v2.5.0-alpha-2' +PACKAGE_STRING='onedrive v2.5.0-alpha-2' PACKAGE_BUGREPORT='https://github.com/abraunegg/onedrive' PACKAGE_URL='' @@ -1219,7 +1219,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures onedrive v2.4.25 to adapt to many kinds of systems. +\`configure' configures onedrive v2.5.0-alpha-2 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1280,7 +1280,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of onedrive v2.4.25:";; + short | recursive ) echo "Configuration of onedrive v2.5.0-alpha-2:";; esac cat <<\_ACEOF @@ -1393,7 +1393,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -onedrive configure v2.4.25 +onedrive configure v2.5.0-alpha-2 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -1410,7 +1410,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by onedrive $as_me v2.4.25, which was +It was created by onedrive $as_me v2.5.0-alpha-2, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -2162,7 +2162,7 @@ fi -PACKAGE_DATE="June 2023" +PACKAGE_DATE="October 2023" @@ -3159,7 +3159,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by onedrive $as_me v2.4.25, which was +This file was extended by onedrive $as_me v2.5.0-alpha-2, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -3212,7 +3212,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -onedrive config.status v2.4.25 +onedrive config.status v2.5.0-alpha-2 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index 9c2c0db26..65c7abf59 100644 --- a/configure.ac +++ b/configure.ac @@ -9,7 +9,7 @@ dnl - commit the changed files (configure.ac, configure) dnl - tag the release AC_PREREQ([2.69]) -AC_INIT([onedrive],[v2.4.25], [https://github.com/abraunegg/onedrive], [onedrive]) +AC_INIT([onedrive],[v2.5.0-alpha-2], [https://github.com/abraunegg/onedrive], [onedrive]) AC_CONFIG_SRCDIR([src/main.d]) diff --git a/docs/BusinessSharedFolders.md b/docs/BusinessSharedFolders.md deleted file mode 100644 index 3f0429434..000000000 --- a/docs/BusinessSharedFolders.md +++ /dev/null @@ -1,192 +0,0 @@ -# How to configure OneDrive Business Shared Folder Sync -## Application Version -Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. - -## Process Overview -Syncing OneDrive Business Shared Folders requires additional configuration for your 'onedrive' client: -1. List available shared folders to determine which folder you wish to sync & to validate that you have access to that folder -2. Create a new file called 'business_shared_folders' in your config directory which contains a list of the shared folders you wish to sync -3. Test the configuration using '--dry-run' -4. Sync the OneDrive Business Shared folders as required - -## Listing available OneDrive Business Shared Folders -List the available OneDrive Business Shared folders with the following command: -```text -onedrive --list-shared-folders -``` - This will return a listing of all OneDrive Business Shared folders which have been shared with you and by whom. This is important for conflict resolution: -```text -Initializing the Synchronization Engine ... - -Listing available OneDrive Business Shared Folders: ---------------------------------------- -Shared Folder: SharedFolder0 -Shared By: Firstname Lastname ---------------------------------------- -Shared Folder: SharedFolder1 -Shared By: Firstname Lastname ---------------------------------------- -Shared Folder: SharedFolder2 -Shared By: Firstname Lastname ---------------------------------------- -Shared Folder: SharedFolder0 -Shared By: Firstname Lastname (user@domain) ---------------------------------------- -Shared Folder: SharedFolder1 -Shared By: Firstname Lastname (user@domain) ---------------------------------------- -Shared Folder: SharedFolder2 -Shared By: Firstname Lastname (user@domain) -... -``` - -## Configuring OneDrive Business Shared Folders -1. Create a new file called 'business_shared_folders' in your config directory -2. On each new line, list the OneDrive Business Shared Folder you wish to sync -```text -[alex@centos7full onedrive]$ cat ~/.config/onedrive/business_shared_folders -# comment -Child Shared Folder -# Another comment -Top Level to Share -[alex@centos7full onedrive]$ -``` -3. Validate your configuration with `onedrive --display-config`: -```text -Configuration file successfully loaded -onedrive version = v2.4.3 -Config path = /home/alex/.config/onedrive-business/ -Config file found in config path = true -Config option 'check_nosync' = false -Config option 'sync_dir' = /home/alex/OneDriveBusiness -Config option 'skip_dir' = -Config option 'skip_file' = ~*|.~*|*.tmp -Config option 'skip_dotfiles' = false -Config option 'skip_symlinks' = false -Config option 'monitor_interval' = 300 -Config option 'min_notify_changes' = 5 -Config option 'log_dir' = /var/log/onedrive/ -Config option 'classify_as_big_delete' = 1000 -Config option 'sync_root_files' = false -Selective sync 'sync_list' configured = false -Business Shared Folders configured = true -business_shared_folders contents: -# comment -Child Shared Folder -# Another comment -Top Level to Share -``` - -## Performing a sync of OneDrive Business Shared Folders -Perform a standalone sync using the following command: `onedrive --synchronize --sync-shared-folders --verbose`: -```text -onedrive --synchronize --sync-shared-folders --verbose -Using 'user' Config Dir: /home/alex/.config/onedrive-business/ -Using 'system' Config Dir: -Configuration file successfully loaded -Initializing the OneDrive API ... -Configuring Global Azure AD Endpoints -Opening the item database ... -All operations will be performed in: /home/alex/OneDriveBusiness -Application version: v2.4.3 -Account Type: business -Default Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA -Default Root ID: 01WIXGO5V6Y2GOVW7725BZO354PWSELRRZ -Remaining Free Space: 1098316220277 -Fetching details for OneDrive Root -OneDrive Root exists in the database -Initializing the Synchronization Engine ... -Syncing changes from OneDrive ... -Applying changes of Path ID: 01WIXGO5V6Y2GOVW7725BZO354PWSELRRZ -Number of items from OneDrive to process: 0 -Attempting to sync OneDrive Business Shared Folders -Syncing this OneDrive Business Shared Folder: Child Shared Folder -OneDrive Business Shared Folder - Shared By: test user -Applying changes of Path ID: 01JRXHEZMREEB3EJVHNVHKNN454Q7DFXPR -Adding OneDrive root details for processing -Adding OneDrive folder details for processing -Adding 4 OneDrive items for processing from OneDrive folder -Adding 2 OneDrive items for processing from /Child Shared Folder/Cisco VDI Whitepaper -Adding 2 OneDrive items for processing from /Child Shared Folder/SMPP_Shared -Processing 11 OneDrive items to ensure consistent local state -Syncing this OneDrive Business Shared Folder: Top Level to Share -OneDrive Business Shared Folder - Shared By: test user (testuser@mynasau3.onmicrosoft.com) -Applying changes of Path ID: 01JRXHEZLRMXHKBYZNOBF3TQOPBXS3VZMA -Adding OneDrive root details for processing -Adding OneDrive folder details for processing -Adding 4 OneDrive items for processing from OneDrive folder -Adding 3 OneDrive items for processing from /Top Level to Share/10-Files -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Cisco VDI Whitepaper -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Images -Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/JPG -Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/PNG -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/SMPP -Processing 31 OneDrive items to ensure consistent local state -Uploading differences of ~/OneDriveBusiness -Processing root -The directory has not changed -Processing SMPP_Local -The directory has not changed -Processing SMPP-IF-SPEC_v3_3-24858.pdf -The file has not changed -Processing SMPP_v3_4_Issue1_2-24857.pdf -The file has not changed -Processing new_local_file.txt -The file has not changed -Processing root -The directory has not changed -... -The directory has not changed -Processing week02-03-Combinational_Logic-v1.pptx -The file has not changed -Uploading new items of ~/OneDriveBusiness -Applying changes of Path ID: 01WIXGO5V6Y2GOVW7725BZO354PWSELRRZ -Number of items from OneDrive to process: 0 -Attempting to sync OneDrive Business Shared Folders -Syncing this OneDrive Business Shared Folder: Child Shared Folder -OneDrive Business Shared Folder - Shared By: test user -Applying changes of Path ID: 01JRXHEZMREEB3EJVHNVHKNN454Q7DFXPR -Adding OneDrive root details for processing -Adding OneDrive folder details for processing -Adding 4 OneDrive items for processing from OneDrive folder -Adding 2 OneDrive items for processing from /Child Shared Folder/Cisco VDI Whitepaper -Adding 2 OneDrive items for processing from /Child Shared Folder/SMPP_Shared -Processing 11 OneDrive items to ensure consistent local state -Syncing this OneDrive Business Shared Folder: Top Level to Share -OneDrive Business Shared Folder - Shared By: test user (testuser@mynasau3.onmicrosoft.com) -Applying changes of Path ID: 01JRXHEZLRMXHKBYZNOBF3TQOPBXS3VZMA -Adding OneDrive root details for processing -Adding OneDrive folder details for processing -Adding 4 OneDrive items for processing from OneDrive folder -Adding 3 OneDrive items for processing from /Top Level to Share/10-Files -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Cisco VDI Whitepaper -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Images -Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/JPG -Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/PNG -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/SMPP -Processing 31 OneDrive items to ensure consistent local state -``` - -**Note:** Whenever you modify the `business_shared_folders` file you must perform a `--resync` of your database to clean up stale entries due to changes in your configuration. - -## Enable / Disable syncing of OneDrive Business Shared Folders -Performing a sync of the configured OneDrive Business Shared Folders can be enabled / disabled via adding the following to your configuration file. - -### Enable syncing of OneDrive Business Shared Folders via config file -```text -sync_business_shared_folders = "true" -``` - -### Disable syncing of OneDrive Business Shared Folders via config file -```text -sync_business_shared_folders = "false" -``` - -## Known Issues -Shared folders, shared with you from people outside of your 'organisation' are unable to be synced. This is due to the Microsoft Graph API not presenting these folders. - -Shared folders that match this scenario, when you view 'Shared' via OneDrive online, will have a 'world' symbol as per below: - -![shared_with_me](./images/shared_with_me.JPG) - -This issue is being tracked by: [#966](https://github.com/abraunegg/onedrive/issues/966) diff --git a/docs/USAGE.md b/docs/USAGE.md deleted file mode 100644 index 235b15d3e..000000000 --- a/docs/USAGE.md +++ /dev/null @@ -1,1469 +0,0 @@ -# Configuration and Usage of the OneDrive Free Client -## Application Version -Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. - -## Table of Contents -- [Using the client](#using-the-client) - * [Upgrading from 'skilion' client](#upgrading-from-skilion-client) - * [Local File and Folder Naming Conventions](#local-file-and-folder-naming-conventions) - * [curl compatibility](#curl-compatibility) - * [Authorize the application with your OneDrive Account](#authorize-the-application-with-your-onedrive-account) - * [Show your configuration](#show-your-configuration) - * [Testing your configuration](#testing-your-configuration) - * [Performing a sync](#performing-a-sync) - * [Performing a single directory sync](#performing-a-single-directory-sync) - * [Performing a 'one-way' download sync](#performing-a-one-way-download-sync) - * [Performing a 'one-way' upload sync](#performing-a-one-way-upload-sync) - * [Performing a selective sync via 'sync_list' file](#performing-a-selective-sync-via-sync_list-file) - * [Performing a --resync](#performing-a---resync) - * [Performing a --force-sync without a --resync or changing your configuration](#performing-a---force-sync-without-a---resync-or-changing-your-configuration) - * [Increasing logging level](#increasing-logging-level) - * [Client Activity Log](#client-activity-log) - * [Notifications](#notifications) - * [Handling a OneDrive account password change](#handling-a-onedrive-account-password-change) -- [Configuration](#configuration) - * [The default configuration](#the-default-configuration-file-is-listed-below) - * ['config' file configuration examples](#config-file-configuration-examples) - + [sync_dir](#sync_dir) - + [sync_dir directory and file permissions](#sync_dir-directory-and-file-permissions) - + [skip_dir](#skip_dir) - + [skip_file](#skip_file) - + [skip_dotfiles](#skip_dotfiles) - + [monitor_interval](#monitor_interval) - + [monitor_fullscan_frequency](#monitor_fullscan_frequency) - + [monitor_log_frequency](#monitor_log_frequency) - + [min_notify_changes](#min_notify_changes) - + [operation_timeout](#operation_timeout) - + [ip_protocol_version](#ip_protocol_version) - + [classify_as_big_delete](#classify_as_big_delete) - * [Configuring the client for 'single tenant application' use](#configuring-the-client-for-single-tenant-application-use) - * [Configuring the client to use older 'skilion' application identifier](#configuring-the-client-to-use-older-skilion-application-identifier) -- [Frequently Asked Configuration Questions](#frequently-asked-configuration-questions) - * [How to sync only specific or single directory?](#how-to-sync-only-specific-or-single-directory) - * [How to 'skip' directories from syncing?](#how-to-skip-directories-from-syncing) - * [How to 'skip' files from syncing?](#how-to-skip-files-from-syncing) - * [How to 'skip' dot files and folders from syncing?](#how-to-skip-dot-files-and-folders-from-syncing) - * [How to 'skip' files larger than a certain size from syncing?](#how-to-skip-files-larger-than-a-certain-size-from-syncing) - * [How to 'rate limit' the application to control bandwidth consumed for upload & download operations?](#how-to-rate-limit-the-application-to-control-bandwidth-consumed-for-upload--download-operations) - * [How to prevent your local disk from filling up?](#how-to-prevent-your-local-disk-from-filling-up) - * [How are symbolic links handled by the client?](#how-are-symbolic-links-handled-by-the-client) - * [How to sync shared folders (OneDrive Personal)?](#how-to-sync-shared-folders-onedrive-personal) - * [How to sync shared folders (OneDrive Business or Office 365)?](#how-to-sync-shared-folders-onedrive-business-or-office-365) - * [How to sync sharePoint / Office 365 Shared Libraries?](#how-to-sync-sharepoint--office-365-shared-libraries) - * [How to run a user systemd service at boot without user login?](#how-to-run-a-user-systemd-service-at-boot-without-user-login) - * [How to create a shareable link?](#how-to-create-a-shareable-link) - * [How to sync both Personal and Business accounts at the same time?](#how-to-sync-both-personal-and-business-accounts-at-the-same-time) - * [How to sync multiple SharePoint Libraries at the same time?](#how-to-sync-multiple-sharepoint-libraries-at-the-same-time) -- [Running 'onedrive' in 'monitor' mode](#running-onedrive-in-monitor-mode) - * [Use webhook to subscribe to remote updates in 'monitor' mode](#use-webhook-to-subscribe-to-remote-updates-in-monitor-mode) - * [More webhook configuration options](#more-webhook-configuration-options) - + [webhook_listening_host and webhook_listening_port](#webhook_listening_host-and-webhook_listening_port) - + [webhook_expiration_interval and webhook_renewal_interval](#webhook_expiration_interval-and-webhook_renewal_interval) -- [Running 'onedrive' as a system service](#running-onedrive-as-a-system-service) - * [OneDrive service running as root user via init.d](#onedrive-service-running-as-root-user-via-initd) - * [OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-root-user-via-systemd-arch-ubuntu-debian-opensuse-fedora) - * [OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux)](#onedrive-service-running-as-root-user-via-systemd-red-hat-enterprise-linux-centos-linux) - * [OneDrive service running as a non-root user via systemd (All Linux Distributions)](#onedrive-service-running-as-a-non-root-user-via-systemd-all-linux-distributions) - * [OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-a-non-root-user-via-systemd-with-notifications-enabled-arch-ubuntu-debian-opensuse-fedora) - * [OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void)](#onedrive-service-running-as-a-non-root-user-via-runit-antix-devuan-artix-void) -- [Additional Configuration](#additional-configuration) - * [Advanced Configuration of the OneDrive Free Client](#advanced-configuration-of-the-onedrive-free-client) - * [Access OneDrive service through a proxy](#access-onedrive-service-through-a-proxy) - * [Setup selinux for a sync folder outside of the home folder](#setup-selinux-for-a-sync-folder-outside-of-the-home-folder) -- [All available commands](#all-available-commands) - -## Using the client -### Upgrading from 'skilion' client -The 'skilion' version contains a significant number of defects in how the local sync state is managed. When upgrading from the 'skilion' version to this version, it is advisable to stop any service / onedrive process from running and then remove any `items.sqlite3` file from your configuration directory (`~/.config/onedrive/`) as this will force the creation of a new local cache file. - -Additionally, if you are using a 'config' file within your configuration directory (`~/.config/onedrive/`), please ensure that you update the `skip_file = ` option as per below: - -**Invalid configuration:** -```text -skip_file = ".*|~*" -``` -**Minimum valid configuration:** -```text -skip_file = "~*" -``` -**Default valid configuration:** -```text -skip_file = "~*|.~*|*.tmp" -``` - -Do not use a skip_file entry of `.*` as this will prevent correct searching of local changes to process. - -### Local File and Folder Naming Conventions -The files and directories in the synchronization directory must follow the [Windows naming conventions](https://docs.microsoft.com/windows/win32/fileio/naming-a-file). -The application will attempt to handle instances where you have two files with the same names but with different capitalization. Where there is a namespace clash, the file name which clashes will not be synced. This is expected behavior and won't be fixed. - -### curl compatibility -If your system utilises curl < 7.47.0, curl defaults to HTTP/1.1 for HTTPS operations. The client will use HTTP/1.1. - -If your system utilises curl >= 7.47.0 and < 7.62.0, curl will prefer HTTP/2 for HTTPS but will stick to HTTP/1.1 by default. The client will use HTTP/1.1 for HTTPS operations. - -If your system utilises curl >= 7.62.0, curl defaults to prefer HTTP/2 over HTTP/1.1 by default. The client will utilse HTTP/2 for most HTTPS operations and HTTP/1.1 for others. This difference is governed by the OneDrive platform and not this client. - -If you wish to explicitly use HTTP/1.1 you will need to use the `--force-http-11` flag or set the config option `force_http_11 = "true"` to force the application to use HTTP/1.1 otherwise all client operations will use whatever is the curl default for your distribution. - -### Authorize the application with your OneDrive Account -After installing the application you must authorize the application with your OneDrive Account. This is done by running the application without any additional command switches. - -Note that some companies require to explicitly add this app in [Microsoft MyApps portal](https://myapps.microsoft.com/). To add an (approved) app to your apps, click on the ellipsis in the top-right corner and choose "Request new apps". On the next page you can add this app. If its not listed, you should request through your IT department. - -You will be asked to open a specific URL by using your web browser where you will have to login into your Microsoft Account and give the application the permission to access your files. After giving permission to the application, you will be redirected to a blank page. Copy the URI of the blank page into the application. -```text -[user@hostname ~]$ onedrive - -Authorize this app visiting: - -https://..... - -Enter the response uri: - -``` - -**Example:** -``` -[user@hostname ~]$ onedrive -Authorize this app visiting: - -https://login.microsoftonline.com/common/oauth2/v2.0/authorize?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient - -Enter the response uri: https://login.microsoftonline.com/common/oauth2/nativeclient?code= - -Application has been successfully authorised, however no additional command switches were provided. - -Please use 'onedrive --help' for further assistance in regards to running this application. -``` - -### Show your configuration -To validate your configuration the application will use, utilize the following: -```text -onedrive --display-config -``` -This will display all the pertinent runtime interpretation of the options and configuration you are using. Example output is as follows: -```text -Configuration file successfully loaded -onedrive version = vX.Y.Z-A-bcdefghi -Config path = /home/alex/.config/onedrive -Config file found in config path = true -Config option 'sync_dir' = /home/alex/OneDrive -Config option 'enable_logging' = false -... -Selective sync 'sync_list' configured = false -Config option 'sync_business_shared_folders' = false -Business Shared Folders configured = false -Config option 'webhook_enabled' = false -``` - -### Testing your configuration -You are able to test your configuration by utilising the `--dry-run` CLI option. No files will be downloaded, uploaded or removed, however the application will display what 'would' have occurred. For example: -```text -onedrive --synchronize --verbose --dry-run -DRY-RUN Configured. Output below shows what 'would' have occurred. -Loading config ... -Using Config Dir: /home/user/.config/onedrive -Initializing the OneDrive API ... -Opening the item database ... -All operations will be performed in: /home/user/OneDrive -Initializing the Synchronization Engine ... -Account Type: personal -Default Drive ID: -Default Root ID: -Remaining Free Space: 5368709120 -Fetching details for OneDrive Root -OneDrive Root exists in the database -Syncing changes from OneDrive ... -Applying changes of Path ID: -Uploading differences of . -Processing root -The directory has not changed -Uploading new items of . -OneDrive Client requested to create remote path: ./newdir -The requested directory to create was not found on OneDrive - creating remote directory: ./newdir -Successfully created the remote directory ./newdir on OneDrive -Uploading new file ./newdir/newfile.txt ... done. -Remaining free space: 5368709076 -Applying changes of Path ID: -``` - -**Note:** `--dry-run` can only be used with `--synchronize`. It cannot be used with `--monitor` and will be ignored. - -### Performing a sync -By default all files are downloaded in `~/OneDrive`. After authorizing the application, a sync of your data can be performed by running: -```text -onedrive --synchronize -``` -This will synchronize files from your OneDrive account to your `~/OneDrive` local directory. - -If you prefer to use your local files as stored in `~/OneDrive` as the 'source of truth' use the following sync command: -```text -onedrive --synchronize --local-first -``` - -### Performing a single directory sync -In some cases it may be desirable to sync a single directory under ~/OneDrive without having to change your client configuration. To do this use the following command: -```text -onedrive --synchronize --single-directory '' -``` - -Example: If the full path is `~/OneDrive/mydir`, the command would be `onedrive --synchronize --single-directory 'mydir'` - -### Performing a 'one-way' download sync -In some cases it may be desirable to 'download only' from OneDrive. To do this use the following command: -```text -onedrive --synchronize --download-only -``` - -### Performing a 'one-way' upload sync -In some cases it may be desirable to 'upload only' to OneDrive. To do this use the following command: -```text -onedrive --synchronize --upload-only -``` -**Note:** If a file or folder is present on OneDrive, that was previously synced and now does not exist locally, that item it will be removed from OneDrive. If the data on OneDrive should be kept, the following should be used: -```text -onedrive --synchronize --upload-only --no-remote-delete -``` -**Note:** The operation of 'upload only' does not request data from OneDrive about what 'other' data exists online. The client only knows about the data that 'this' client uploaded, thus any files or folders created or uploaded outside of this client will remain untouched online. - -### Performing a selective sync via 'sync_list' file -Selective sync allows you to sync only specific files and directories. -To enable selective sync create a file named `sync_list` in your application configuration directory (default is `~/.config/onedrive`). - -Important points to understand before using 'sync_list'. -* 'sync_list' excludes _everything_ by default on onedrive. -* 'sync_list' follows an _"exclude overrides include"_ rule, and requires **explicit inclusion**. -* Order exclusions before inclusions, so that anything _specifically included_ is included. -* How and where you place your `/` matters for excludes and includes in sub directories. - -Each line of the file represents a relative path from your `sync_dir`. All files and directories not matching any line of the file will be skipped during all operations. - -Additionally, the use of `/` is critically important to determine how a rule is interpreted. It is very similar to `**` wildcards, for those that are familiar with globbing patterns. -Here is an example of `sync_list`: -```text -# sync_list supports comments -# -# The ordering of entries is highly recommended - exclusions before inclusions -# -# Exclude temp folder(s) or file(s) under Documents folder(s), anywhere in Onedrive -!Documents/temp* -# -# Exclude secret data folder in root directory only -!/Secret_data/* -# -# Include everything else in root directory -/* -# -# Include my Backup folder(s) or file(s) anywhere on Onedrive -Backup -# -# Include my Backup folder in root -/Backup/ -# -# Include Documents folder(s) anywhere in Onedrive -Documents/ -# -# Include all PDF files in Documents folder(s), anywhere in Onedrive -Documents/*.pdf -# -# Include this single document in Documents folder(s), anywhere in Onedrive -Documents/latest_report.docx -# -# Include all Work/Project directories or files, inside 'Work' folder(s), anywhere in Onedrive -Work/Project* -# -# Include all "notes.txt" files, anywhere in Onedrive -notes.txt -# -# Include /Blender in the ~Onedrive root but not if elsewhere in Onedrive -/Blender -# -# Include these directories(or files) in 'Pictures' folder(s), that have a space in their name -Pictures/Camera Roll -Pictures/Saved Pictures -# -# Include these names if they match any file or folder -Cinema Soc -Codes -Textbooks -Year 2 -``` -The following are supported for pattern matching and exclusion rules: -* Use the `*` to wildcard select any characters to match for the item to be included -* Use either `!` or `-` characters at the start of the line to exclude an otherwise included item - - -**Note:** When enabling the use of 'sync_list' utilise the `--display-config` option to validate that your configuration will be used by the application, and test your configuration by adding `--dry-run` to ensure the client will operate as per your requirement. - -**Note:** After changing the sync_list, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync` - -**Note:** In some circumstances, it may be required to sync all the individual files within the 'sync_dir', but due to frequent name change / addition / deletion of these files, it is not desirable to constantly change the 'sync_list' file to include / exclude these files and force a resync. To assist with this, enable the following in your configuration file: -```text -sync_root_files = "true" -``` -This will tell the application to sync any file that it finds in your 'sync_dir' root by default. - -### Performing a --resync -If you modify any of the following configuration items, you will be required to perform a `--resync` to ensure your client is syncing your data with the updated configuration: -* sync_dir -* skip_dir -* skip_file -* drive_id -* Modifying sync_list -* Modifying business_shared_folders - -Additionally, you may choose to perform a `--resync` if you feel that this action needs to be taken to ensure your data is in sync. If you are using this switch simply because you dont know the sync status, you can query the actual sync status using `--display-sync-status`. - -When using `--resync`, the following warning and advice will be presented: -```text -The use of --resync will remove your local 'onedrive' client state, thus no record will exist regarding your current 'sync status' -This has the potential to overwrite local versions of files with potentially older versions downloaded from OneDrive which can lead to data loss -If in-doubt, backup your local data first before proceeding with --resync - -Are you sure you wish to proceed with --resync? [Y/N] -``` - -To proceed with using `--resync`, you must type 'y' or 'Y' to allow the application to continue. - -**Note:** It is highly recommended to only use `--resync` if the application advises you to use it. Do not just blindly set the application to start with `--resync` as the default option. - -**Note:** In some automated environments (and it is 100% assumed you *know* what you are doing because of automation), in order to avoid this 'proceed with acknowledgement' requirement, add `--resync-auth` to automatically acknowledge the prompt. - -### Performing a --force-sync without a --resync or changing your configuration -In some cases and situations, you may have configured the application to skip certain files and folders using 'skip_file' and 'skip_dir' configuration. You then may have a requirement to actually sync one of these items, but do not wish to modify your configuration, nor perform an entire `--resync` twice. - -The `--force-sync` option allows you to sync a specific directory, ignoring your 'skip_file' and 'skip_dir' configuration and negating the requirement to perform a `--resync` - -In order to use this option, you must run the application manually in the following manner: -```text -onedrive --synchronize --single-directory '' --force-sync -``` - -When using `--force-sync`, the following warning and advice will be presented: -```text -WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --synchronize --single-directory --force-sync being used - -The use of --force-sync will reconfigure the application to use defaults. This may have untold and unknown future impacts. -By proceeding in using this option you accept any impacts including any data loss that may occur as a result of using --force-sync. - -Are you sure you wish to proceed with --force-sync [Y/N] -``` - -To proceed with using `--force-sync`, you must type 'y' or 'Y' to allow the application to continue. - -### Increasing logging level -When running a sync it may be desirable to see additional information as to the progress and operation of the client. To do this, use the following command: -```text -onedrive --synchronize --verbose -``` - -### Client Activity Log -When running onedrive all actions can be logged to a separate log file. This can be enabled by using the `--enable-logging` flag. By default, log files will be written to `/var/log/onedrive/` - -**Note:** You will need to ensure the existence of this directory, and that your user has the applicable permissions to write to this directory or the following warning will be printed: -```text -Unable to access /var/log/onedrive/ -Please manually create '/var/log/onedrive/' and set appropriate permissions to allow write access -The requested client activity log will instead be located in the users home directory -``` - -On many systems this can be achieved by -```text -sudo mkdir /var/log/onedrive -sudo chown root:users /var/log/onedrive -sudo chmod 0775 /var/log/onedrive -``` - -All log files will be in the format of `%username%.onedrive.log`, where `%username%` represents the user who ran the client. - -Additionally, you need to ensure that your user account is part of the 'users' group: -``` -cat /etc/group | grep users -``` - -If your user is not part of this group, then you need to add your user to this group: -``` -sudo usermod -a -G users -``` - -You then need to 'logout' of all sessions / SSH sessions to login again to have the new group access applied. - - -**Note:** -To use a different log directory rather than the default above, add the following as a configuration option to `~/.config/onedrive/config`: -```text -log_dir = "/path/to/location/" -``` -Trailing slash required - -An example of the log file is below: -```text -2018-Apr-07 17:09:32.1162837 Loading config ... -2018-Apr-07 17:09:32.1167908 No config file found, using defaults -2018-Apr-07 17:09:32.1170626 Initializing the OneDrive API ... -2018-Apr-07 17:09:32.5359143 Opening the item database ... -2018-Apr-07 17:09:32.5515295 All operations will be performed in: /root/OneDrive -2018-Apr-07 17:09:32.5518387 Initializing the Synchronization Engine ... -2018-Apr-07 17:09:36.6701351 Applying changes of Path ID: -2018-Apr-07 17:09:37.4434282 Adding OneDrive Root to the local database -2018-Apr-07 17:09:37.4478342 The item is already present -2018-Apr-07 17:09:37.4513752 The item is already present -2018-Apr-07 17:09:37.4550062 The item is already present -2018-Apr-07 17:09:37.4586444 The item is already present -2018-Apr-07 17:09:37.7663571 Adding OneDrive Root to the local database -2018-Apr-07 17:09:37.7739451 Fetching details for OneDrive Root -2018-Apr-07 17:09:38.0211861 OneDrive Root exists in the database -2018-Apr-07 17:09:38.0215375 Uploading differences of . -2018-Apr-07 17:09:38.0220464 Processing -2018-Apr-07 17:09:38.0224884 The directory has not changed -2018-Apr-07 17:09:38.0229369 Processing -2018-Apr-07 17:09:38.02338 The directory has not changed -2018-Apr-07 17:09:38.0237678 Processing -2018-Apr-07 17:09:38.0242285 The directory has not changed -2018-Apr-07 17:09:38.0245977 Processing -2018-Apr-07 17:09:38.0250788 The directory has not changed -2018-Apr-07 17:09:38.0254657 Processing -2018-Apr-07 17:09:38.0259923 The directory has not changed -2018-Apr-07 17:09:38.0263547 Uploading new items of . -2018-Apr-07 17:09:38.5708652 Applying changes of Path ID: -``` - -### Notifications -If notification support is compiled in, the following events will trigger a notification within the display manager session: -* Aborting a sync if .nosync file is found -* Cannot create remote directory -* Cannot upload file changes -* Cannot delete remote file / folder -* Cannot move remote file / folder - - -### Handling a OneDrive account password change -If you change your OneDrive account password, the client will no longer be authorised to sync, and will generate the following error: -```text -ERROR: OneDrive returned a 'HTTP 401 Unauthorized' - Cannot Initialize Sync Engine -``` -To re-authorise the client, follow the steps below: -1. If running the client as a service (init.d or systemd), stop the service -2. Run the command `onedrive --reauth`. This will clean up the previous authorisation, and will prompt you to re-authorise the client as per initial configuration. -3. Restart the client if running as a service or perform a manual sync - -The application will now sync with OneDrive with the new credentials. - -## Configuration - -Configuration is determined by three layers: the default values, values set in the configuration file, and values passed in via the command line. The default values provide a reasonable default, and configuration is optional. - -Most command line options have a respective configuration file setting. - -If you want to change the defaults, you can copy and edit the included config file into your configuration directory. Valid default directories for the config file are: -* `~/.config/onedrive` -* `/etc/onedrive` - -**Example:** -```text -mkdir -p ~/.config/onedrive -wget https://raw.githubusercontent.com/abraunegg/onedrive/master/config -O ~/.config/onedrive/config -nano ~/.config/onedrive/config -``` -This file does not get created by default, and should only be created if you want to change the 'default' operational parameters. - -See the [config](https://raw.githubusercontent.com/abraunegg/onedrive/master/config) file for the full list of options, and [All available commands](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#all-available-commands) for all possible keys and their default values. - -**Note:** The location of the application configuration information can also be specified by using the `--confdir` configuration option which can be passed in at client run-time. - -### The default configuration file is listed below: -```text -# Configuration for OneDrive Linux Client -# This file contains the list of supported configuration fields -# with their default values. -# All values need to be enclosed in quotes -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -# sync_dir = "~/OneDrive" -# skip_file = "~*|.~*|*.tmp" -# monitor_interval = "300" -# skip_dir = "" -# log_dir = "/var/log/onedrive/" -# drive_id = "" -# upload_only = "false" -# check_nomount = "false" -# check_nosync = "false" -# download_only = "false" -# disable_notifications = "false" -# disable_upload_validation = "false" -# enable_logging = "false" -# force_http_11 = "false" -# local_first = "false" -# no_remote_delete = "false" -# skip_symlinks = "false" -# debug_https = "false" -# skip_dotfiles = "false" -# skip_size = "1000" -# dry_run = "false" -# min_notify_changes = "5" -# monitor_log_frequency = "6" -# monitor_fullscan_frequency = "12" -# sync_root_files = "false" -# classify_as_big_delete = "1000" -# user_agent = "" -# remove_source_files = "false" -# skip_dir_strict_match = "false" -# application_id = "" -# resync = "false" -# resync_auth = "false" -# bypass_data_preservation = "false" -# azure_ad_endpoint = "" -# azure_tenant_id = "common" -# sync_business_shared_folders = "false" -# sync_dir_permissions = "700" -# sync_file_permissions = "600" -# rate_limit = "131072" -# webhook_enabled = "false" -# webhook_public_url = "" -# webhook_listening_host = "" -# webhook_listening_port = "8888" -# webhook_expiration_interval = "86400" -# webhook_renewal_interval = "43200" -# space_reservation = "50" -# display_running_config = "false" -# read_only_auth_scope = "false" -# cleanup_local_files = "false" -# operation_timeout = "3600" -# dns_timeout = "60" -# connect_timeout = "10" -# data_timeout = "600" -# ip_protocol_version = "0" -``` - -### 'config' file configuration examples: -The below are 'config' file examples to assist with configuration of the 'config' file: - -#### sync_dir -Configure your local sync directory location. - -Example: -```text -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -sync_dir="~/MyDirToSync" -# skip_file = "~*|.~*|*.tmp" -# monitor_interval = "300" -# skip_dir = "" -# log_dir = "/var/log/onedrive/" -``` -**Please Note:** -Proceed with caution here when changing the default sync dir from `~/OneDrive` to `~/MyDirToSync` - -The issue here is around how the client stores the sync_dir path in the database. If the config file is missing, or you don't use the `--syncdir` parameter - what will happen is the client will default back to `~/OneDrive` and 'think' that either all your data has been deleted - thus delete the content on OneDrive, or will start downloading all data from OneDrive into the default location. - -**Note:** After changing `sync_dir`, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync` - -**Important Note:** If your `sync_dir` is pointing to a network mount point (a network share via NFS, Windows Network Share, Samba Network Share) these types of network mount points do not support 'inotify', thus tracking real-time changes via inotify of local files is not possible. Local filesystem changes will be replicated between the local filesystem and OneDrive based on the `monitor_interval` value. This is not something (inotify support for NFS, Samba) that this client can fix. - -#### sync_dir directory and file permissions -The following are directory and file default permissions for any new directory or file that is created: -* Directories: 700 - This provides the following permissions: `drwx------` -* Files: 600 - This provides the following permissions: `-rw-------` - -To change the default permissions, update the following 2 configuration options with the required permissions. Utilise the [Unix Permissions Calculator](https://chmod-calculator.com/) to assist in determining the required permissions. - -```text -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -... -# sync_business_shared_folders = "false" -sync_dir_permissions = "700" -sync_file_permissions = "600" - -``` - -**Important:** Special permission bits (setuid, setgid, sticky bit) are not supported. Valid permission values are from `000` to `777` only. - -#### skip_dir -This option is used to 'skip' certain directories and supports pattern matching. - -Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. - -**Important:** Entries under `skip_dir` are relative to your `sync_dir` path. - -Example: -```text -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -# sync_dir = "~/OneDrive" -# skip_file = "~*|.~*|*.tmp" -# monitor_interval = "300" -skip_dir = "Desktop|Documents/IISExpress|Documents/SQL Server Management Studio|Documents/Visual Studio*|Documents/WindowsPowerShell" -# log_dir = "/var/log/onedrive/" -``` - -**Note:** The `skip_dir` can be specified multiple times, for example: -```text -skip_dir = "SomeDir|OtherDir|ThisDir|ThatDir" -skip_dir = "/Path/To/A/Directory" -skip_dir = "/Another/Path/To/Different/Directory" -``` -This will be interpreted the same as: -```text -skip_dir = "SomeDir|OtherDir|ThisDir|ThatDir|/Path/To/A/Directory|/Another/Path/To/Different/Directory" -``` - -**Note:** After changing `skip_dir`, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync` - -#### skip_file -This option is used to 'skip' certain files and supports pattern matching. - -Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. - -Files can be skipped in the following fashion: -* Specify a wildcard, eg: '*.txt' (skip all txt files) -* Explicitly specify the filename and it's full path relative to your sync_dir, eg: '/path/to/file/filename.ext' -* Explicitly specify the filename only and skip every instance of this filename, eg: 'filename.ext' - -By default, the following files will be skipped: -* Files that start with ~ -* Files that start with .~ (like .~lock.* files generated by LibreOffice) -* Files that end in .tmp - -**Important:** Do not use a skip_file entry of `.*` as this will prevent correct searching of local changes to process. - -Example: -```text -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -# sync_dir = "~/OneDrive" -skip_file = "~*|/Documents/OneNote*|/Documents/config.xlaunch|myfile.ext|/Documents/keepass.kdbx" -# monitor_interval = "300" -# skip_dir = "" -# log_dir = "/var/log/onedrive/" -``` - -**Note:** The `skip_file` can be specified multiple times, for example: -```text -skip_file = "~*|.~*|*.tmp|*.swp" -skip_file = "*.blah" -skip_file = "never_sync.file" -skip_file = "/Documents/keepass.kdbx" -``` -This will be interpreted the same as: -```text -skip_file = "~*|.~*|*.tmp|*.swp|*.blah|never_sync.file|/Documents/keepass.kdbx" -``` - -**Note:** after changing `skip_file`, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync` - -#### skip_dotfiles -Setting this to `"true"` will skip all .files and .folders while syncing. - -Example: -```text -# skip_symlinks = "false" -# debug_https = "false" -skip_dotfiles = "true" -# dry_run = "false" -# monitor_interval = "300" -``` - -#### monitor_interval -The monitor interval is defined as the wait time 'between' sync's when running in monitor mode. When this interval expires, the client will check OneDrive for changes online, performing data integrity checks and scanning the local 'sync_dir' for new content. - -By default without configuration, 'monitor_interval' is set to 300 seconds. Setting this value to 600 will run the sync process every 10 minutes. - -Example: -```text -# skip_dotfiles = "false" -# dry_run = "false" -monitor_interval = "600" -# min_notify_changes = "5" -# monitor_log_frequency = "6" -``` -**Note:** It is strongly advised you do not use a value of less than 300 seconds for 'monitor_interval'. Using a value less than 300 means your application will be constantly needlessly checking OneDrive online for changes. Future versions of the application may enforce the checking of this minimum value. - -#### monitor_fullscan_frequency -This configuration option controls the number of 'monitor_interval' iterations between when a full scan of your data is performed to ensure data integrity and consistency. - -By default without configuration, 'monitor_fullscan_frequency' is set to 12. In this default state, this means that a full scan is performed every 'monitor_interval' x 'monitor_fullscan_frequency' = 3600 seconds. This is only applicable when running in --monitor mode. - -Setting this value to 24 means that the full scan of OneDrive and checking the integrity of the data stored locally will occur every 2 hours (assuming 'monitor_interval' is set to 300 seconds): - -Example: -```text -# min_notify_changes = "5" -# monitor_log_frequency = "6" -monitor_fullscan_frequency = "24" -# sync_root_files = "false" -# classify_as_big_delete = "1000" -``` - -**Note:** When running in --monitor mode, at application start-up, a full scan will be performed to ensure data integrity. This option has zero effect when running the application in `--synchronize` mode and a full scan will always be performed. - -#### monitor_log_frequency -This configuration option controls the output of when logging is performed to detail that a sync is occuring with OneDrive when using `--monitor` mode. The frequency of syncing with OneDrive is controled via 'monitor_interval'. - -By default without configuration, 'monitor_log_frequency' is set to 6. - -By default, at application start-up when using `--monitor` mode, the following will be logged to indicate that the application has correctly started and performed all the initial processing steps: -``` -Configuring Global Azure AD Endpoints -Initializing the Synchronization Engine ... -Initializing monitor ... -OneDrive monitor interval (seconds): 300 -Starting a sync with OneDrive -Syncing changes from OneDrive ... -Performing a database consistency and integrity check on locally stored data ... -Sync with OneDrive is complete -``` -Then, based on 'monitor_log_frequency', the following will be logged when the value is reached: -``` -Starting a sync with OneDrive -Syncing changes from OneDrive ... -Sync with OneDrive is complete -``` -**Note:** The additional log output `Performing a database consistency and integrity check on locally stored data ...` will only be displayed when this activity is occuring which is triggered by 'monitor_fullscan_frequency'. - -#### min_notify_changes -This option defines the minimum number of pending incoming changes necessary to trigger a desktop notification. This allows controlling the frequency of notifications. - -Example: -```text -# dry_run = "false" -# monitor_interval = "300" -min_notify_changes = "50" -# monitor_log_frequency = "6" -# monitor_fullscan_frequency = "12" -``` - -#### operation_timeout -Operation Timeout is the maximum amount of time (seconds) a file operation is allowed to take. This includes DNS resolution, connecting, data transfer, etc. - -Example: -```text -# sync_file_permissions = "600" -# rate_limit = "131072" -operation_timeout = "3600" -``` - -#### ip_protocol_version -By default, the application will use IPv4 and IPv6 to resolve and communicate with Microsoft OneDrive. In some Linux distributions (most notably Ubuntu and those distributions based on Ubuntu) this will cause problems due to how DNS resolution is being performed. - -To configure the application to use a specific IP version, configure the following in your config file: -```text -# operation_timeout = "3600" -# dns_timeout = "60" -# connect_timeout = "10" -# data_timeout = "600" -ip_protocol_version = "1" - -``` -**Note:** -* A value of 0 will mean the client will use IPv4 and IPv6. This is the default. -* A value of 1 will mean the client will use IPv4 only. -* A value of 2 will mean the client will use IPv6 only. - -#### classify_as_big_delete -This configuration option will help prevent the online deletion of files and folders online, when the directory that has been deleted contains more items than the specified value. - -By default, this value is 1000 which will count files and folders as children of the directory that has been deleted. - -To change this value, configure the following in your config file: -```text -# monitor_fullscan_frequency = "12" -# sync_root_files = "false" -classify_as_big_delete = "3000" -# user_agent = "" -# remove_source_files = "false" -``` - -**Note:** -* This option only looks at Directories. It has zero effect on deleting files located in your 'sync_dir' root -* This option (in v2.4.x and below) only gets activated when using `--monitor`. In `--synchronize` mode it is ignored as it is assumed you performed that desired operation before you started your next manual sync with OneDrive. -* Be sensible with setting this value - do not use a low value such as '1' as this will prevent you from syncing your data each and every time you delete a single file. - - -#### Configuring the client for 'single tenant application' use -In some instances when using OneDrive Business Accounts, depending on the Azure organisational configuration, it will be necessary to configure the client as a 'single tenant application'. -To configure this, after creating the application on your Azure tenant, update the 'config' file with the tenant name (not the GUID) and the newly created Application ID, then this will be used for the authentication process. -```text -# skip_dir_strict_match = "false" -application_id = "your.application.id.guid" -# resync = "false" -# bypass_data_preservation = "false" -# azure_ad_endpoint = "xxxxxx" -azure_tenant_id = "your.azure.tenant.name" -# sync_business_shared_folders = "false" -``` - -#### Configuring the client to use older 'skilion' application identifier -In some instances it may be desirable to utilise the older 'skilion' application identifier to avoid authorising a new application ID within Microsoft Azure environments. -To configure this, update the 'config' file with the old Application ID, then this will be used for the authentication process. -```text -# skip_dir_strict_match = "false" -application_id = "22c49a0d-d21c-4792-aed1-8f163c982546" -# resync = "false" -# bypass_data_preservation = "false" -``` -**Note:** The application will now use the older 'skilion' client identifier, however this may increase your chances of getting a OneDrive 429 error. - -**Note:** After changing the 'application_id' you will need to restart any 'onedrive' process you have running, and potentially issue a `--reauth` to re-authenticate the client with this updated application ID. - -## Frequently Asked Configuration Questions - -### How to sync only specific or single directory? -There are two methods to achieve this: -* Utilise '--single-directory' option to only sync this specific path -* Utilise 'sync_list' to configure what files and directories to sync, and what should be exluded - -### How to 'skip' directories from syncing? -There are several mechanisms available to 'skip' a directory from the sync process: -* Utilise 'skip_dir' to configure what directories to skip. Refer to above for configuration advice. -* Utilise 'sync_list' to configure what files and directories to sync, and what should be exluded - -One further method is to add a '.nosync' empty file to any folder. When this file is present, adding `--check-for-nosync` to your command line will now make the sync process skip any folder where the '.nosync' file is present. - -To make this a permanent change to always skip folders when a '.nosync' empty file is present, add the following to your config file: - -Example: -```text -# upload_only = "false" -# check_nomount = "false" -check_nosync = "true" -# download_only = "false" -# disable_notifications = "false" -``` -**Default:** False - -### How to 'skip' files from syncing? -There are two methods to achieve this: -* Utilise 'skip_file' to configure what files to skip. Refer to above for configuration advice. -* Utilise 'sync_list' to configure what files and directories to sync, and what should be exluded - -### How to 'skip' dot files and folders from syncing? -There are three methods to achieve this: -* Utilise 'skip_file' or 'skip_dir' to configure what files or folders to skip. Refer to above for configuration advice. -* Utilise 'sync_list' to configure what files and directories to sync, and what should be exluded -* Utilise 'skip_dotfiles' to skip any dot file (for example: `.Trash-1000` or `.xdg-volume-info`) from syncing to OneDrive. - -Example: -```text -# skip_symlinks = "false" -# debug_https = "false" -skip_dotfiles = "true" -# skip_size = "1000" -# dry_run = "false" -``` -**Default:** False - -### How to 'skip' files larger than a certain size from syncing? -There are two methods to achieve this: -* Use `--skip-size ARG` as part of a CLI command to skip new files larger than this size (in MB) -* Use `skip_size = "value"` as part of your 'config' file where files larger than this size (in MB) will be skipped - -### How to 'rate limit' the application to control bandwidth consumed for upload & download operations? -To minimise the Internet bandwidth for upload and download operations, you can configure the 'rate_limit' option within the config file. - -Example valid values for this are as follows: -* 131072 = 128 KB/s - minimum for basic application operations to prevent timeouts -* 262144 = 256 KB/s -* 524288 = 512 KB/s -* 1048576 = 1 MB/s -* 10485760 = 10 MB/s -* 104857600 = 100 MB/s - -Example: -```text -# sync_business_shared_folders = "false" -# sync_dir_permissions = "700" -# sync_file_permissions = "600" -rate_limit = "131072" -``` - -**Note:** A number greater than '131072' is a valid value, with '104857600' being tested as an upper limit. - -### How to prevent your local disk from filling up? -By default, the application will reserve 50MB of disk space to prevent your filesystem to run out of disk space. This value can be modified by adding the following to your config file: - -Example: -```text -... -# webhook_expiration_interval = "86400" -# webhook_renewal_interval = "43200" -space_reservation = "10" -``` - -The value entered is in MB (Mega Bytes). In this example, a value of 10MB is being used, and will be converted to bytes by the application. The value being used can be reviewed when using `--display-config`: -``` -Config option 'sync_dir_permissions' = 700 -Config option 'sync_file_permissions' = 600 -Config option 'space_reservation' = 10485760 -Config option 'application_id' = -Config option 'azure_ad_endpoint' = -Config option 'azure_tenant_id' = common -``` - -Any value is valid here, however, if you use a value of '0' a value of '1' will actually be used, so that you actually do not run out of disk space. - -### How are symbolic links handled by the client? -Microsoft OneDrive has zero concept or understanding of symbolic links, and attempting to upload a symbolic link to Microsoft OneDrive generates a platform API error. All data (files and folders) that are uploaded to OneDrive must be whole files or actual directories. - -As such, there are only two methods to support symbolic links with this client: -1. Follow the Linux symbolic link and upload what ever the link is pointing at to OneDrive. This is the default behaviour. -2. Skip symbolic links by configuring the application to do so. In skipping, no data, no link, no reference is uploaded to OneDrive. - -To skip symbolic links, edit your configuration as per below: - -```text -# local_first = "false" -# no_remote_delete = "false" -skip_symlinks = "true" -# debug_https = "false" -# skip_dotfiles = "false" -``` -Setting this to `"true"` will configure the client to skip all symbolic links while syncing. - -The default setting is `"false"` which will sync the whole folder structure referenced by the symbolic link, duplicating the contents on OneDrive in the place where the symbolic link is. - -### How to sync shared folders (OneDrive Personal)? -Folders shared with you can be synced by adding them to your OneDrive. To do that open your Onedrive, go to the Shared files list, right click on the folder you want to sync and then click on "Add to my OneDrive". - -### How to sync shared folders (OneDrive Business or Office 365)? -Refer to [./BusinessSharedFolders.md](BusinessSharedFolders.md) for configuration assistance. - -Do not use the 'Add shortcut to My files' from the OneDrive web based interface to add a 'shortcut' to your shared folder. This shortcut is not supported by the OneDrive API, thus it cannot be used. - -### How to sync sharePoint / Office 365 Shared Libraries? -Refer to [./SharePoint-Shared-Libraries.md](SharePoint-Shared-Libraries.md) for configuration assistance. - -### How to run a user systemd service at boot without user login? -In some cases it may be desirable for the systemd service to start without having to login as your 'user' - -To avoid this issue, you need to reconfigure your 'user' account so that the systemd services you have created will startup without you having to login to your system: -```text -loginctl enable-linger -``` - -### How to create a shareable link? -In some cases it may be desirable to create a shareable file link and give this link to other users to access a specific file. - -To do this, use the following command: -```text -onedrive --create-share-link -``` -**Note:** By default this will be a read-only link. - -To make this a read-write link, use the following command: -```text -onedrive --create-share-link --with-editing-perms -``` -**Note:** The ordering of the option file path and option flag is important. - -### How to sync both Personal and Business accounts at the same time? -You must configure separate instances of the application configuration for each account. - -Refer to [./advanced-usage.md](advanced-usage.md) for configuration assistance. - -### How to sync multiple SharePoint Libraries at the same time? -You must configure a separate instances of the application configuration for each SharePoint Library. - -Refer to [./advanced-usage.md](advanced-usage.md) for configuration assistance. - -## Running 'onedrive' in 'monitor' mode -Monitor mode (`--monitor`) allows the onedrive process to continually monitor your local file system for changes to files. - -Two common errors can occur when using monitor mode: -* Intialisation failure -* Unable to add a new inotify watch - -Both of these errors are local environment issues, where the following system variables need to be increased as the current system values are potentially too low: -* `fs.file-max` -* `fs.inotify.max_user_watches` - -To determine what the existing values are on your system use the following commands: -```text -sysctl fs.file-max -sysctl fs.inotify.max_user_watches -``` - -To determine what value to change to, you need to count all the files and folders in your configured 'sync_dir': -```text -cd /path/to/your/sync/dir -ls -laR | wc -l -``` - -To make a change to these variables using your file and folder count: -``` -sudo sysctl fs.file-max= -sudo sysctl fs.inotify.max_user_watches= -``` - -To make these changes permanent, refer to your OS reference documentation. - -### Use webhook to subscribe to remote updates in 'monitor' mode - -A webhook can be optionally enabled in the monitor mode to allow the onedrive process to subscribe to remote updates. Remote changes can be synced to your local file system as soon as possible, without waiting for the next sync cycle. - -To enable this feature, you need to configure the following options in the config file: - -```text -webhook_enabled = "true" -webhook_public_url = "" -``` - -Setting `webhook_enabled` to `true` enables the webhook in 'monitor' mode. The onedrive process will listen for incoming updates at a configurable endpoint, which defaults to `0.0.0.0:8888`. The `webhook_public_url` must be set to an public-facing url for Microsoft to send updates to your webhook. If your host is directly exposed to the Internet, the `webhook_public_url` can be set to `http://:8888/` to match the default endpoint. However, the recommended approach is to configure a reverse proxy like nginx. - -**Note:** A valid HTTPS certificate is required for your public-facing URL if using nginx. - -For example, below is a nginx config snippet to proxy traffic into the webhook: - -```text -server { - listen 80; - location /webhooks/onedrive { - proxy_http_version 1.1; - proxy_pass http://127.0.0.1:8888; - } -} -``` - -With nginx running, you can configure `webhook_public_url` to `https:///webhooks/onedrive`. - -If you receive this application error: -```text -Subscription validation request failed. Response must exactly match validationToken query parameter. -``` -The most likely cause for this error will be your nginx configuration. To resolve, potentially investigate the following configuration for nginx: - -```text -server { - listen 80; - location /webhooks/onedrive { - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Original-Request-URI $request_uri; - proxy_read_timeout 300s; - proxy_connect_timeout 75s; - proxy_buffering off; - proxy_http_version 1.1; - proxy_pass http://127.0.0.1:8888; - } -} -``` - -For any further nginx configuration assistance, please refer to: https://docs.nginx.com/ - -### More webhook configuration options - -Below options can be optionally configured. The default is usually good enough. - -#### webhook_listening_host and webhook_listening_port - -Set `webhook_listening_host` and `webhook_listening_port` to change the webhook listening endpoint. If `webhook_listening_host` is left empty, which is the default, the webhook will bind to `0.0.0.0`. The default `webhook_listening_port` is `8888`. - -``` -webhook_listening_host = "" -webhook_listening_port = "8888" -``` - -#### webhook_expiration_interval and webhook_renewal_interval - -Set `webhook_expiration_interval` and `webhook_renewal_interval` to change the frequency of subscription renewal. By default, the webhook asks Microsoft to keep subscriptions alive for 24 hours, and it renews subscriptions when it is less than 12 hours before their expiration. - -``` -# Default expiration interval is 24 hours -webhook_expiration_interval = "86400" - -# Default renewal interval is 12 hours -webhook_renewal_interval = "43200" -``` - -## Running 'onedrive' as a system service -There are a few ways to use onedrive as a service -* via init.d -* via systemd -* via runit - -**Note:** If using the service files, you may need to increase the `fs.inotify.max_user_watches` value on your system to handle the number of files in the directory you are monitoring as the initial value may be too low. - -### OneDrive service running as root user via init.d -```text -chkconfig onedrive on -service onedrive start -``` -To see the logs run: -```text -tail -f /var/log/onedrive/.onedrive.log -``` -To change what 'user' the client runs under (by default root), manually edit the init.d service file and modify `daemon --user root onedrive_service.sh` for the correct user. - -### OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora) -First, su to root using `su - root`, then enable the systemd service: -```text -systemctl --user enable onedrive -systemctl --user start onedrive -``` -**Note:** `systemctl --user` directive is not applicable for Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms - see below. - -**Note:** This will run the 'onedrive' process with a UID/GID of '0', thus, any files or folders that are created will be owned by 'root' - -To view the status of the service running, use the following: -```text -systemctl --user status onedrive.service -``` - -To see the systemd application logs run: -```text -journalctl --user-unit=onedrive -f -``` - -**Note:** It is a 'systemd' requirement that the XDG environment variables exist for correct enablement and operation of systemd services. If you receive this error when enabling the systemd service: -``` -Failed to connect to bus: No such file or directory -``` -The most likely cause is that the XDG environment variables are missing. To fix this, you must add the following to `.bashrc` or any other file which is run on user login: -``` -export XDG_RUNTIME_DIR="/run/user/$UID" -export DBUS_SESSION_BUS_ADDRESS="unix:path=${XDG_RUNTIME_DIR}/bus" -``` - -To make this change effective, you must logout of all user accounts where this change has been made. - -**Note:** On some systems (for example - Raspbian / Ubuntu / Debian on Raspberry Pi) the above XDG fix may not be reliable after system reboots. The potential alternative to start the client via systemd as root, is to perform the following: -1. Create a symbolic link from `/home/root/.config/onedrive` pointing to `/root/.config/onedrive/` -2. Create a systemd service using the '@' service file: `systemctl enable onedrive@root.service` -3. Start the root@service: `systemctl start onedrive@root.service` - -This will ensure that the service will correctly restart on system reboot. - -To see the systemd application logs run: -```text -journalctl --unit=onedrive@ -f -``` - -### OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux) -```text -systemctl enable onedrive -systemctl start onedrive -``` -**Note:** This will run the 'onedrive' process with a UID/GID of '0', thus, any files or folders that are created will be owned by 'root' - -To see the systemd application logs run: -```text -journalctl --unit=onedrive -f -``` - -### OneDrive service running as a non-root user via systemd (All Linux Distributions) -In some cases it is desirable to run the OneDrive client as a service, but not running as the 'root' user. In this case, follow the directions below to configure the service for your normal user login. - -1. As the user, who will be running the service, run the application in standalone mode, authorize the application for use & validate that the synchronization is working as expected: -```text -onedrive --synchronize --verbose -``` -2. Once the application is validated and working for your user, as the 'root' user, where is your username from step 1 above. -```text -systemctl enable onedrive@.service -systemctl start onedrive@.service -``` -3. To view the status of the service running for the user, use the following: -```text -systemctl status onedrive@.service -``` - -To see the systemd application logs run: -```text -journalctl --unit=onedrive@ -f -``` - -### OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora) -In some cases you may wish to receive GUI notifications when using the client when logged in as a non-root user. In this case, follow the directions below: - -1. Login via graphical UI as user you wish to enable the service for -2. Disable any `onedrive@` service files for your username - eg: -```text -sudo systemctl stop onedrive@alex.service -sudo systemctl disable onedrive@alex.service -``` -3. Enable service as per the following: -```text -systemctl --user enable onedrive -systemctl --user start onedrive -``` - -To view the status of the service running for the user, use the following: -```text -systemctl --user status onedrive.service -``` - -To see the systemd application logs run: -```text -journalctl --user-unit=onedrive -f -``` - -**Note:** `systemctl --user` directive is not applicable for Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms - -### OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void) - -1. Create the following folder if not present already `/etc/sv/runsvdir-` - - - where `` is the `USER` targeted for the service - - _e.g_ `# mkdir /etc/sv/runsvdir-nolan` - -2. Create a file called `run` under the previously created folder with - executable permissions - - - `# touch /etc/sv/runsvdir-/run` - - `# chmod 0755 /etc/sv/runsvdir-/run` - -3. Edit the `run` file with the following contents (priviledges needed) - - ```sh - #!/bin/sh - export USER="" - export HOME="/home/" - - groups="$(id -Gn "${USER}" | tr ' ' ':')" - svdir="${HOME}/service" - - exec chpst -u "${USER}:${groups}" runsvdir "${svdir}" - ``` - - - do not forget to correct the `` according to the `USER` set on - step #1 - -4. Enable the previously created folder as a service - - - `# ln -fs /etc/sv/runsvdir- /var/service/` - -5. Create a subfolder on the `USER`'s `HOME` directory to store the services - (or symlinks) - - - `$ mkdir ~/service` - -6. Create a subfolder for OneDrive specifically - - - `$ mkdir ~/service/onedrive/` - -7. Create a file called `run` under the previously created folder with - executable permissions - - - `$ touch ~/service/onedrive/run` - - `$ chmod 0755 ~/service/onedrive/run` - -8. Append the following contents to the `run` file - - ```sh - #!/usr/bin/env sh - exec /usr/bin/onedrive --monitor - ``` - - - in some scenario the path for the `onedrive` binary might differ, you can - obtain it regardless by running `$ command -v onedrive` - -9. Reboot to apply changes - -10. Check status of user-defined services - - - `$ sv status ~/service/*` - -You may refer to Void's documentation regarding -[Per-User Services](https://docs.voidlinux.org/config/services/user-services.html) -for extra details. - -## Additional Configuration -### Advanced Configuration of the OneDrive Free Client -* Configuring the client to use mulitple OneDrive accounts / configurations, for example: - * Setup to use onedrive with both Personal and Business accounts - * Setup to use onedrive with multiple SharePoint Libraries -* Configuring the client for use in dual-boot (Windows / Linux) situations -* Configuring the client for use when 'sync_dir' is a mounted directory -* Upload data from the local ~/OneDrive folder to a specific location on OneDrive - -Refer to [./advanced-usage.md](advanced-usage.md) for configuration assistance. - -### Access OneDrive service through a proxy -If you have a requirement to run the client through a proxy, there are a couple of ways to achieve this: -1. Set proxy configuration in `~/.bashrc` to allow the authorization process and when utilizing `--synchronize` -2. If running as a systemd service, edit the applicable systemd service file to include the proxy configuration information: -```text -[Unit] -Description=OneDrive Free Client -Documentation=https://github.com/abraunegg/onedrive -After=network-online.target -Wants=network-online.target - -[Service] -Environment="HTTP_PROXY=http://ip.address:port" -Environment="HTTPS_PROXY=http://ip.address:port" -ExecStart=/usr/local/bin/onedrive --monitor -Restart=on-failure -RestartSec=3 - -[Install] -WantedBy=default.target -``` - -**Note:** After modifying the service files, you will need to run `sudo systemctl daemon-reload` to ensure the service file changes are picked up. A restart of the OneDrive service will also be required to pick up the change to send the traffic via the proxy server - -### Setup selinux for a sync folder outside of the home folder -If selinux is enforced and the sync folder is outside of the home folder, as long as there is no policy for cloud fileservice providers, label the file system folder to user_home_t. -```text -sudo semanage fcontext -a -t user_home_t /path/to/onedriveSyncFolder -sudo restorecon -R -v /path/to/onedriveSyncFolder -``` -To remove this change from selinux and restore the default behaivor: -```text -sudo semanage fcontext -d /path/to/onedriveSyncFolder -sudo restorecon -R -v /path/to/onedriveSyncFolder -``` - -## All available commands -Output of `onedrive --help` -```text -OneDrive - a client for OneDrive Cloud Services - -Usage: - onedrive [options] --synchronize - Do a one time synchronization - onedrive [options] --monitor - Monitor filesystem and sync regularly - onedrive [options] --display-config - Display the currently used configuration - onedrive [options] --display-sync-status - Query OneDrive service and report on pending changes - onedrive -h | --help - Show this help screen - onedrive --version - Show version - -Options: - - --auth-files ARG - Perform authorization via two files passed in as ARG in the format `authUrl:responseUrl` - The authorization URL is written to the `authUrl`, then onedrive waits for the file `responseUrl` - to be present, and reads the response from that file. - --auth-response ARG - Perform authentication not via interactive dialog but via providing the response url directly. - --check-for-nomount - Check for the presence of .nosync in the syncdir root. If found, do not perform sync. - --check-for-nosync - Check for the presence of .nosync in each directory. If found, skip directory from sync. - --classify-as-big-delete - Number of children in a path that is locally removed which will be classified as a 'big data delete' - --cleanup-local-files - Cleanup additional local files when using --download-only. This will remove local data. - --confdir ARG - Set the directory used to store the configuration files - --create-directory ARG - Create a directory on OneDrive - no sync will be performed. - --create-share-link ARG - Create a shareable link for an existing file on OneDrive - --debug-https - Debug OneDrive HTTPS communication. - --destination-directory ARG - Destination directory for renamed or move on OneDrive - no sync will be performed. - --disable-download-validation - Disable download validation when downloading from OneDrive - --disable-notifications - Do not use desktop notifications in monitor mode. - --disable-upload-validation - Disable upload validation when uploading to OneDrive - --display-config - Display what options the client will use as currently configured - no sync will be performed. - --display-running-config - Display what options the client has been configured to use on application startup. - --display-sync-status - Display the sync status of the client - no sync will be performed. - --download-only - Replicate the OneDrive online state locally, by only downloading changes from OneDrive. Do not upload local changes to OneDrive. - --dry-run - Perform a trial sync with no changes made - --enable-logging - Enable client activity to a separate log file - --force - Force the deletion of data when a 'big delete' is detected - --force-http-11 - Force the use of HTTP 1.1 for all operations - --force-sync - Force a synchronization of a specific folder, only when using --single-directory and ignoring all non-default skip_dir and skip_file rules - --get-O365-drive-id ARG - Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library - --get-file-link ARG - Display the file link of a synced file - --help -h - This help information. - --list-shared-folders - List OneDrive Business Shared Folders - --local-first - Synchronize from the local directory source first, before downloading changes from OneDrive. - --log-dir ARG - Directory where logging output is saved to, needs to end with a slash. - --logout - Logout the current user - --min-notify-changes ARG - Minimum number of pending incoming changes necessary to trigger a desktop notification - --modified-by ARG - Display the last modified by details of a given path - --monitor -m - Keep monitoring for local and remote changes - --monitor-fullscan-frequency ARG - Number of sync runs before performing a full local scan of the synced directory - --monitor-interval ARG - Number of seconds by which each sync operation is undertaken when idle under monitor mode. - --monitor-log-frequency ARG - Frequency of logging in monitor mode - --no-remote-delete - Do not delete local file 'deletes' from OneDrive when using --upload-only - --operation-timeout ARG - Maximum amount of time (in seconds) an operation is allowed to take - --print-token - Print the access token, useful for debugging - --reauth - Reauthenticate the client with OneDrive - --remove-directory ARG - Remove a directory on OneDrive - no sync will be performed. - --remove-source-files - Remove source file after successful transfer to OneDrive when using --upload-only - --resync - Forget the last saved state, perform a full sync - --resync-auth - Approve the use of performing a --resync action - --single-directory ARG - Specify a single local directory within the OneDrive root to sync. - --skip-dir ARG - Skip any directories that match this pattern from syncing - --skip-dir-strict-match - When matching skip_dir directories, only match explicit matches - --skip-dot-files - Skip dot files and folders from syncing - --skip-file ARG - Skip any files that match this pattern from syncing - --skip-size ARG - Skip new files larger than this size (in MB) - --skip-symlinks - Skip syncing of symlinks - --source-directory ARG - Source directory to rename or move on OneDrive - no sync will be performed. - --space-reservation ARG - The amount of disk space to reserve (in MB) to avoid 100% disk space utilisation - --sync-root-files - Sync all files in sync_dir root when using sync_list. - --sync-shared-folders - Sync OneDrive Business Shared Folders - --syncdir ARG - Specify the local directory used for synchronization to OneDrive - --synchronize - Perform a synchronization - --upload-only - Replicate the locally configured sync_dir state to OneDrive, by only uploading local changes to OneDrive. Do not download changes from OneDrive. - --user-agent ARG - Specify a User Agent string to the http client - --verbose -v+ - Print more details, useful for debugging (repeat for extra debugging) - --version - Print the version and exit - --with-editing-perms - Create a read-write shareable link for an existing file on OneDrive when used with --create-share-link -``` diff --git a/docs/application-config-options.md b/docs/application-config-options.md new file mode 100644 index 000000000..00c30d47b --- /dev/null +++ b/docs/application-config-options.md @@ -0,0 +1,1013 @@ +# Application Configuration Options for the OneDrive Client for Linux +## Application Version +Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. + +## Table of Contents + +- [Configuration File Options](#configuration-file-options) + - [application_id](#application_id) + - [azure_ad_endpoint](#azure_ad_endpoint) + - [azure_tenant_id](#azure_tenant_id) + - [bypass_data_preservation](#bypass_data_preservation) + - [check_nomount](#check_nomount) + - [check_nosync](#check_nosync) + - [classify_as_big_delete](#classify_as_big_delete) + - [cleanup_local_files](#cleanup_local_files) + - [connect_timeout](#connect_timeout) + - [data_timeout](#data_timeout) + - [debug_https](#debug_https) + - [disable_download_validation](#disable_download_validation) + - [disable_notifications](#disable_notifications) + - [disable_upload_validation](#disable_upload_validation) + - [display_running_config](#display_running_config) + - [dns_timeout](#dns_timeout) + - [download_only](#download_only) + - [drive_id](#drive_id) + - [dry_run](#dry_run) + - [enable_logging](#enable_logging) + - [force_http_11](#force_http_11) + - [ip_protocol_version](#ip_protocol_version) + - [local_first](#local_first) + - [log_dir](#log_dir) + - [monitor_fullscan_frequency](#monitor_fullscan_frequency) + - [monitor_interval](#monitor_interval) + - [monitor_log_frequency](#monitor_log_frequency) + - [no_remote_delete](#no_remote_delete) + - [operation_timeout](#operation_timeout) + - [rate_limit](#rate_limit) + - [read_only_auth_scope](#read_only_auth_scope) + - [remove_source_files](#remove_source_files) + - [resync](#resync) + - [resync_auth](#resync_auth) + - [skip_dir](#skip_dir) + - [skip_dir_strict_match](#skip_dir_strict_match) + - [skip_dotfiles](#skip_dotfiles) + - [skip_file](#skip_file) + - [skip_size](#skip_size) + - [skip_symlinks](#skip_symlinks) + - [space_reservation](#space_reservation) + - [sync_business_shared_items](#sync_business_shared_items) + - [sync_dir](#sync_dir) + - [sync_dir_permissions](#sync_dir_permissions) + - [sync_file_permissions](#sync_file_permissions) + - [sync_root_files](#sync_root_files) + - [upload_only](#upload_only) + - [user_agent](#user_agent) + - [webhook_enabled](#webhook_enabled) + - [webhook_expiration_interval](#webhook_expiration_interval) + - [webhook_listening_host](#webhook_listening_host) + - [webhook_listening_port](#webhook_listening_port) + - [webhook_public_url](#webhook_public_url) + - [webhook_renewal_interval](#webhook_renewal_interval) +- [Command Line Interface (CLI) Only Options](#command-line-interface-cli-only-options) + - [CLI Option: --auth-files](#cli-option---auth-files) + - [CLI Option: --auth-response](#cli-option---auth-response) + - [CLI Option: --confdir](#cli-option---confdir) + - [CLI Option: --create-directory](#cli-option---create-directory) + - [CLI Option: --create-share-link](#cli-option---create-share-link) + - [CLI Option: --destination-directory](#cli-option---destination-directory) + - [CLI Option: --display-config](#cli-option---display-config) + - [CLI Option: --display-sync-status](#cli-option---display-sync-status) + - [CLI Option: --force](#cli-option---force) + - [CLI Option: --force-sync](#cli-option---force-sync) + - [CLI Option: --get-file-link](#cli-option---get-file-link) + - [CLI Option: --get-sharepoint-drive-id](#cli-option---get-sharepoint-drive-id) + - [CLI Option: --logout](#cli-option---logout) + - [CLI Option: --modified-by](#cli-option---modified-by) + - [CLI Option: --monitor | -m](#cli-option---monitor--m) + - [CLI Option: --print-access-token](#cli-option---print-access-token) + - [CLI Option: --reauth](#cli-option---reauth) + - [CLI Option: --remove-directory](#cli-option---remove-directory) + - [CLI Option: --single-directory](#cli-option---single-directory) + - [CLI Option: --source-directory](#cli-option---source-directory) + - [CLI Option: --sync | -s](#cli-option---sync--s) + - [CLI Option: --verbose | -v+](#cli-option---verbose--v) + - [CLI Option: --with-editing-perms](#cli-option---with-editing-perms) +- [Depreciated Configuration File and CLI Options](#depreciated-configuration-file-and-cli-options) + - [min_notify_changes](#min_notify_changes) + - [CLI Option: --synchronize](#cli-option---synchronize) + + +## Configuration File Options + +### application_id +_**Description:**_ This is the config option for application id that used used to identify itself to Microsoft OneDrive. In some circumstances, it may be desirable to use your own application id. To do this, you must register a new application with Microsoft Azure via https://portal.azure.com/, then use your new application id with this config option. + +_**Value Type:**_ String + +_**Default Value:**_ d50ca740-c83f-4d1b-b616-12c519384f0c + +_**Config Example:**_ `application_id = "d50ca740-c83f-4d1b-b616-12c519384f0c"` + +### azure_ad_endpoint +_**Description:**_ This is the config option to change the Microsoft Azure Authentication Endpoint that the client uses to conform with data and security requirements that requires data to reside within the geographic borders of that country. + +_**Value Type:**_ String + +_**Default Value:**_ *Empty* - not required for normal operation + +_**Valid Values:**_ USL4, USL5, DE, CN + +_**Config Example:**_ `azure_ad_endpoint = "DE"` + +### azure_tenant_id +_**Description:**_ This config option allows the locking of the client to a specific single tenant and will configure your client to use the specified tenant id in its Azure AD and Graph endpoint URIs, instead of "common". The tenant id may be the GUID Directory ID or the fully qualified tenant name. + +_**Value Type:**_ String + +_**Default Value:**_ *Empty* - not required for normal operation + +_**Config Example:**_ `azure_tenant_id = "example.onmicrosoft.us"` or `azure_tenant_id = "0c4be462-a1ab-499b-99e0-da08ce52a2cc"` + +_**Additional Usage Requirement:**_ Must be configured if 'azure_ad_endpoint' is configured. + +### bypass_data_preservation +_**Description:**_ This config option allows the disabling of preserving local data by renaming the local file in the event of data conflict. If this is enabled, you will experience data loss on your local data as the local file will be over-written with data from OneDrive online. Use with care and caution. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `bypass_data_preservation = "false"` or `bypass_data_preservation = "true"` + +### check_nomount +_**Description:**_ This config option is useful to prevent application startup & ongoing use in 'Monitor Mode' if the configured 'sync_dir' is a separate disk that is being mounted by your system. This option will check for the presence of a `.nosync` file in your mount point, and if present, abort any sync process to preserve data. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `check_nomount = "false"` or `check_nomount = "true"` + +_**CLI Option:**_ `--check-for-nomount` + +_**Additional Usage Requirement:**_ Create a `.nosync` file in your mount point *before* you mount your disk so that this is visible, in your mount point if your disk is unmounted. + +### check_nosync +_**Description:**_ This config option is useful to prevent the sync of a *local* directory to Microsoft OneDrive. It will *not* check for this file online to prevent the download of directories to your local system. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `check_nosync = "false"` or `check_nosync = "true"` + +_**CLI Option Use:**_ `--check-for-nosync` + +_**Additional Usage Requirement:**_ Create a `.nosync` file in any *local* directory that you wish to not sync to Microsoft OneDrive when you enable this option. + +### classify_as_big_delete +_**Description:**_ This config option defines the number of children in a path that is locally removed which will be classified as a 'big data delete' to safeguard large data removals - which are typically accidental local delete events. + +_**Value Type:**_ Integer + +_**Default Value:**_ 1000 + +_**Config Example:**_ `classify_as_big_delete = "2000"` + +_**CLI Option Use:**_ `--classify-as-big-delete 2000` + +_**Additional Usage Requirement:**_ If this option is triggered, you will need to add `--force` to force a sync to occur. + +### cleanup_local_files +_**Description:**_ This config option provides the capability to cleanup local files and folders if they are removed online. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `cleanup_local_files = "false"` or `cleanup_local_files = "true"` + +_**CLI Option Use:**_ `--cleanup-local-files` + +_**Additional Usage Requirement:**_ This configuration option can only be used with 'download_only'. It cannot be used with any other application option. + +### connect_timeout +_**Description:**_ This configuration setting manages the TCP connection timeout duration in seconds for HTTPS connections to Microsoft OneDrive when using the curl library. + +_**Value Type:**_ Integer + +_**Default Value:**_ 30 + +_**Config Example:**_ `connect_timeout = "20"` + +### data_timeout +_**Description:**_ This setting controls the timeout duration, in seconds, for when data is not received on an active connection to Microsoft OneDrive over HTTPS when using the curl library, before that connection is timeout out. + +_**Value Type:**_ Integer + +_**Default Value:**_ 240 + +_**Config Example:**_ `data_timeout = "300"` + +### debug_https +_**Description:**_ This setting controls whether the curl library is configured to output additional data to assist with diagnosing HTTPS issues and problems. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `debug_https = "false"` or `debug_https = "true"` + +_**CLI Option Use:**_ `--debug-https` + +_**Additional Usage Notes:**_ Whilst this option can be used at any time, it is advisable that you only use this option when advised as this will output your `Authorization: bearer` - which is your authentication token to Microsoft OneDrive. + +### disable_download_validation +_**Description:**_ This option determines whether the client will conduct integrity validation on files downloaded from Microsoft OneDrive. Sometimes, when downloading files, particularly from SharePoint, there is a discrepancy between the file size reported by the OneDrive API and the byte count received from the SharePoint HTTP Server for the same file. Enable this option to disable the integrity checks performed by this client. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `disable_download_validation = "false"` or `disable_download_validation = "true"` + +_**CLI Option Use:**_ `--disable-download-validation` + +_**Additional Usage Notes:**_ If you're downloading data from SharePoint or OneDrive Business Shared Folders, you might find it necessary to activate this option. It's important to note that any issues encountered aren't due to a problem with this client; instead, they should be regarded as issues with the Microsoft OneDrive technology stack. + +### disable_notifications +_**Description:**_ This setting controls whether GUI notifications are sent from the client to your display manager session. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `disable_notifications = "false"` or `disable_notifications = "true"` + +_**CLI Option Use:**_ `--disable-notifications` + +### disable_upload_validation +_**Description:**_ This option determines whether the client will conduct integrity validation on files uploaded to Microsoft OneDrive. Sometimes, when uploading files, particularly to SharePoint, SharePoint will modify your file post upload by adding new data to your file which breaks the integrity checking of the upload performed by this client. Enable this option to disable the integrity checks performed by this client. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `disable_upload_validation = "false"` or `disable_upload_validation = "true"` + +_**CLI Option Use:**_ `--disable-upload-validation` + +_**Additional Usage Notes:**_ If you're uploading data to SharePoint or OneDrive Business Shared Folders, you might find it necessary to activate this option. It's important to note that any issues encountered aren't due to a problem with this client; instead, they should be regarded as issues with the Microsoft OneDrive technology stack. + +### display_running_config +_**Description:**_ This option will include the running config of the application at application startup. This may be desirable to enable when running in containerised environments so that any application logging that is occuring, will have the application configuration being consumed at startup, written out to any applicable log file. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `display_running_config = "false"` or `display_running_config = "true"` + +_**CLI Option Use:**_ `--display-running-config` + +### dns_timeout +_**Description:**_ This setting controls the libcurl DNS cache value. By default, libcurl caches this info for 60 seconds. This libcurl DNS cache timeout is entirely speculative that a name resolves to the same address for a small amount of time into the future as libcurl does not use DNS TTL properties. We recommend users not to tamper with this option unless strictly necessary. + +_**Value Type:**_ Integer + +_**Default Value:**_ 60 + +_**Config Example:**_ `dns_timeout = "90"` + +### download_only +_**Description:**_ This setting forces the client to only download data from Microsoft OneDrive and replicate that data locally. No changes made locally will be uploaded to Microsoft OneDrive when using this option. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `download_only = "false"` or `download_only = "true"` + +_**CLI Option Use:**_ `--download-only` + +### drive_id +_**Description:**_ This setting controls the specific drive identifier the client will use when syncing with Microsoft OneDrive. + +_**Value Type:**_ String + +_**Default Value:**_ *None* + +_**Config Example:**_ `drive_id = "b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB"` + +_**Additional Usage Notes:**_ This option is typically only used when configuring the client to sync a specific SharePoint Library. If this configuration option is specified in your config file, a value must be specified otherwise the application will exit citing a fatal error has occured. + +### dry_run +_**Description:**_ This setting controls the application capability to test your application configuration without actually performing any actual activity (download, upload, move, delete, folder creation). + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `dry_run = "false"` or `dry_run = "true"` + +_**CLI Option Use:**_ `--dry-run` + +### enable_logging +_**Description:**_ This setting controls the application logging all actions to a separate file. By default, all log files will be written to `/var/log/onedrive`, however this can changed by using the 'log_dir' config option + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `enable_logging = "false"` or `enable_logging = "true"` + +_**CLI Option Use:**_ `--enable-logging` + +_**Additional Usage Notes:**_ Additional configuration is potentially required to configure the default log directory. Refer to usage.md for details (ADD LINK) + +### force_http_11 +_**Description:**_ This setting controls the application HTTP protocol version. By default, the application will use libcurl defaults for which HTTP prodocol version will be used to interact with Microsoft OneDrive. Use this setting to downgrade libcurl to only use HTTP/1.1. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `force_http_11 = "false"` or `force_http_11 = "true"` + +_**CLI Option Use:**_ `--force-http-11` + +### ip_protocol_version +_**Description:**_ This setting controls the application IP protocol that should be used when communicating with Microsoft OneDrive. The default is to use IPv4 and IPv6 networks for communicating to Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ 0 + +_**Valid Values:**_ 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only + +_**Config Example:**_ `ip_protocol_version = "0"` or `ip_protocol_version = "1"` or `ip_protocol_version = "2"` + +_**Additional Usage Notes:**_ In some environments where IPv4 and IPv6 are configured at the same time, this causes resolution and routing issues to Microsoft OneDrive. If this is the case, it is advisable to change 'ip_protocol_version' to match your environment. + +### local_first +_**Description:**_ This setting controls what the application considers the 'source of truth' for your data. By default, what is stored online will be considered as the 'source of truth' when syncing to your local machine. When using this option, your local data will be considered the 'source of truth'. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `local_first = "false"` or `local_first = "true"` + +_**CLI Option Use:**_ `--local-first` + +### log_dir +_**Description:**_ This setting controls the custom application log path when 'enable_logging' has been enabled. By default, all log files will be written to `/var/log/onedrive`. + +_**Value Type:**_ String + +_**Default Value:**_ *None* + +_**Config Example:**_ `log_dir = "~/logs/"` + +_**CLI Option Use:**_ `--log-dir "~/logs/"` + +### monitor_fullscan_frequency +_**Description:**_ This configuration option controls the number of 'monitor_interval' iterations between when a full scan of your data is performed to ensure data integrity and consistency. + +_**Value Type:**_ Integer + +_**Default Value:**_ 12 + +_**Config Example:**_ `monitor_fullscan_frequency = "24"` + +_**CLI Option Use:**_ `--monitor-fullscan-frequency '24'` + +_**Additional Usage Notes:**_ By default without configuration, 'monitor_fullscan_frequency' is set to 12. In this default state, this means that a full scan is performed every 'monitor_interval' x 'monitor_fullscan_frequency' = 3600 seconds. This setting is only applicable when running in `--monitor` mode. Setting this configuration option to '0' will *disable* the full scan of your data online. + +### monitor_interval +_**Description:**_ This configuration setting determines how often the synchronisation loops run in --monitor mode, measured in seconds. When this time period elapses, the client will check for online changes in Microsoft OneDrive, conduct integrity checks on local data and scan the local 'sync_dir' to identify any new content that hasn't been uploaded yet. + +_**Value Type:**_ Integer + +_**Default Value:**_ 300 + +_**Config Example:**_ `monitor_interval = "600"` + +_**CLI Option Use:**_ `--monitor-interval '600'` + +_**Additional Usage Notes:**_ A minimum value of 300 is enforced for this configuration setting. + +### monitor_log_frequency +_**Description:**_ This configuration option controls the suppression of frequently printed log items to the system console when using `--monitor` mode. The aim of this configuration item is to reduce the log output when near zero sync activity is occuring. + +_**Value Type:**_ Integer + +_**Default Value:**_ 12 + +_**Config Example:**_ `monitor_log_frequency = "24"` + +_**CLI Option Use:**_ `--monitor-log-frequency '24'` + +_**Additional Usage Notes:**_ + +By default, at application start-up when using `--monitor` mode, the following will be logged to indicate that the application has correctly started and has performed all the initial processing steps: +```text +Reading configuration file: /home/user/.config/onedrive/config +Configuration file successfully loaded +Configuring Global Azure AD Endpoints +Sync Engine Initialised with new Onedrive API instance +All application operations will be performed in: /home/user/OneDrive +OneDrive synchronisation interval (seconds): 300 +Initialising filesystem inotify monitoring ... +Performing initial syncronisation to ensure consistent local state ... +Starting a sync with Microsoft OneDrive +Fetching items from the OneDrive API for Drive ID: b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB .. +Processing changes and items received from Microsoft OneDrive ... +Performing a database consistency and integrity check on locally stored data ... +Scanning the local file system '~/OneDrive' for new data to upload ... +Performing a final true-up scan of online data from Microsoft OneDrive +Fetching items from the OneDrive API for Drive ID: b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB .. +Processing changes and items received from Microsoft OneDrive ... +Sync with Microsoft OneDrive is complete +``` +Then, based on 'monitor_log_frequency', the following output will be logged until the suppression loop value is reached: +```text +Starting a sync with Microsoft OneDrive +Syncing changes from Microsoft OneDrive ... +Sync with Microsoft OneDrive is complete +``` +**Note:** The additional log output `Performing a database consistency and integrity check on locally stored data ...` will only be displayed when this activity is occuring which is triggered by 'monitor_fullscan_frequency'. + +**Note:** If verbose application output is being used (`--verbose`), then this configuration setting has zero effect, as application verbose output takes priority over application output surpression. + +### no_remote_delete +_**Description:**_ This configuration option controls whether local file and folder deletes are actioned on Microsoft OneDrive. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `local_first = "false"` or `local_first = "true"` + +_**CLI Option Use:**_ `--no-remote-delete` + +_**Additional Usage Notes:**_ This configuration option can *only* be used in conjunction with `--upload-only` + +### operation_timeout +_**Description:**_ This configuration option controls the maximum amount of time (seconds) a file operation is allowed to take. This includes DNS resolution, connecting, data transfer, etc. We recommend users not to tamper with this option unless strictly necessary. + +_**Value Type:**_ Integer + +_**Default Value:**_ 3600 + +_**Config Example:**_ `operation_timeout = "3600"` + +### rate_limit +_**Description:**_ This configuration option controls the bandwidth used by the application, per thread, when interacting with Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ 0 (unlimited, use available bandwidth per thread) + +_**Valid Values:**_ Valid tested values for this configuration option are as follows: + +* 131072 = 128 KB/s - absolute minimum for basic application operations to prevent timeouts +* 262144 = 256 KB/s +* 524288 = 512 KB/s +* 1048576 = 1 MB/s +* 10485760 = 10 MB/s +* 104857600 = 100 MB/s + +_**Config Example:**_ `rate_limit = "131072"` + +### read_only_auth_scope +_**Description:**_ This configuration option controls whether the OneDrive Client for Linux operates in a totally in read-only operation. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `read_only_auth_scope = "false"` or `read_only_auth_scope = "true"` + +_**Additional Usage Notes:**_ When using 'read_only_auth_scope' you also will need to remove your existing application access consent otherwise old authentication consent will be valid and will be used. This will mean the application will technically have the consent to upload data until you revoke this consent. + +### remove_source_files +_**Description:**_ This configuration option controls whether the OneDrive Client for Linux removes the local file post successful transfer to Microsoft OneDrive. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `remove_source_files = "false"` or `remove_source_files = "true"` + +_**CLI Option Use:**_ `--remove-source-files` + +_**Additional Usage Notes:**_ This configuration option can *only* be used in conjunction with `--upload-only` + +### resync +_**Description:**_ This configuration option controls whether the known local sync state with Microsoft OneDrive is removed at application startup. When this option is used, a full scan of your data online is performed to ensure that the local sync state is correctly built back up. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `resync = "false"` or `resync = "true"` + +_**CLI Option Use:**_ `--resync` + +_**Additional Usage Notes:**_ It's highly recommended to use this option only if the application prompts you to do so. Don't blindly use this option as a default option. If you alter any of the subsequent configuration items, you will be required to execute a `--resync` to make sure your client is syncing your data with the updated configuration: +* drive_id +* sync_dir +* skip_file +* skip_dir +* skip_dotfiles +* skip_symlinks +* sync_business_shared_items +* Creating, Modifying or Deleting the 'sync_list' file + +### resync_auth +_**Description:**_ This configuration option controls the approval of performing a 'resync' which can be beneficial in automated environments. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `resync_auth = "false"` or `resync_auth = "true"` + +_**CLI Option Use:**_ `--resync-auth` + +_**Additional Usage Notes:**_ In certain automated environments (assuming you know what you're doing due to automation), to avoid the 'proceed with acknowledgement' resync requirement, this option allows you to automatically acknowledge the resync prompt. + +### skip_dir +_**Description:**_ This configuration option controls whether the application skips certain directories from being synced. + +_**Value Type:**_ String + +_**Default Value:**_ *Empty* - not required for normal operation + +_**Config Example:**_ + +Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. Entries for 'skip_dir' are *relative* to your 'sync_dir' path. +```text +# When changing a config option below, remove the '#' from the start of the line +# For explanations of all config options below see docs/USAGE.md or the man page. +# +# sync_dir = "~/OneDrive" +# skip_file = "~*|.~*|*.tmp" +# monitor_interval = "300" +skip_dir = "Desktop|Documents/IISExpress|Documents/SQL Server Management Studio|Documents/Visual Studio*|Documents/WindowsPowerShell" +# log_dir = "/var/log/onedrive/" +``` + +The 'skip_dir' option can be specified multiple times within your config file, for example: +```text +skip_dir = "SomeDir|OtherDir|ThisDir|ThatDir" +skip_dir = "/Path/To/A/Directory" +skip_dir = "/Another/Path/To/Different/Directory" +``` + +This will be interpreted the same as: +```text +skip_dir = "SomeDir|OtherDir|ThisDir|ThatDir|/Path/To/A/Directory|/Another/Path/To/Different/Directory" +``` + +_**CLI Option Use:**_ `--skip-dir 'SomeDir|OtherDir|ThisDir|ThatDir|/Path/To/A/Directory|/Another/Path/To/Different/Directory'` + +_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. If using the config file and CLI option is used, the CLI option will *replace* the config file entries. After changing or modifying this option, you will be required to perform a resync. + +### skip_dir_strict_match +_**Description:**_ This configuration option controls whether the application performs strict directory matching when checking 'skip_dir' items. When enabled, the 'skip_dir' item must be a full path match to the path to be skipped. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `skip_dir_strict_match = "false"` or `skip_dir_strict_match = "true"` + +_**CLI Option Use:**_ `--skip-dir-strict-match` + +### skip_dotfiles +_**Description:**_ This configuration option controls whether the application will skip all .files and .folders when performing sync operations. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `skip_dotfiles = "false"` or `skip_dotfiles = "true"` + +_**CLI Option Use:**_ `--skip-dot-files` + +_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync. + +### skip_file +_**Description:**_ This configuration option controls whether the application skips certain files from being synced. + +_**Value Type:**_ String + +_**Default Value:**_ `~*|.~*|*.tmp|*.swp|*.partial` + +_**Config Example:**_ + +Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. + +By default, the following files will be skipped: +* Files that start with ~ +* Files that start with .~ (like .~lock.* files generated by LibreOffice) +* Files that end in .tmp, .swp and .partial + +Files can be skipped in the following fashion: +* Specify a wildcard, eg: '*.txt' (skip all txt files) +* Explicitly specify the filename and it's full path relative to your sync_dir, eg: '/path/to/file/filename.ext' +* Explicitly specify the filename only and skip every instance of this filename, eg: 'filename.ext' + +```text +# When changing a config option below, remove the '#' from the start of the line +# For explanations of all config options below see docs/USAGE.md or the man page. +# +# sync_dir = "~/OneDrive" +skip_file = "~*|/Documents/OneNote*|/Documents/config.xlaunch|myfile.ext|/Documents/keepass.kdbx" +# monitor_interval = "300" +# skip_dir = "" +# log_dir = "/var/log/onedrive/" +``` +The 'skip_file' option can be specified multiple times within your config file, for example: +```text +skip_file = "~*|.~*|*.tmp|*.swp" +skip_file = "*.blah" +skip_file = "never_sync.file" +skip_file = "/Documents/keepass.kdbx" +``` +This will be interpreted the same as: +```text +skip_file = "~*|.~*|*.tmp|*.swp|*.blah|never_sync.file|/Documents/keepass.kdbx" +``` + +_**CLI Option Use:**_ `--skip-file '~*|.~*|*.tmp|*.swp|*.blah|never_sync.file|/Documents/keepass.kdbx'` + +_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. If using the config file and CLI option is used, the CLI option will *replace* the config file entries. After changing or modifying this option, you will be required to perform a resync. + +### skip_size +_**Description:**_ This configuration option controls whether the application skips syncing certain files larger than the specified size. The value specified is in MB. + +_**Value Type:**_ Integer + +_**Default Value:**_ 0 (all files, regardless of size, are synced) + +_**Config Example:**_ `skip_size = "50"` + +_**CLI Option Use:**_ `--skip-size '50'` + +### skip_symlinks +_**Description:**_ This configuration option controls whether the application will skip all symbolic links when performing sync operations. Microsoft OneDrive has no concept or understanding of symbolic links, and attempting to upload a symbolic link to Microsoft OneDrive generates a platform API error. All data (files and folders) that are uploaded to OneDrive must be whole files or actual directories. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `skip_symlinks = "false"` or `skip_symlinks = "true"` + +_**CLI Option Use:**_ `--skip-symlinks` + +_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync. + +### space_reservation +_**Description:**_ This configuration option controls how much local disk space should be reserved, to prevent the application from filling up your entire disk due to misconfiguration + +_**Value Type:**_ Integer + +_**Default Value:**_ 50 MB (expressesed as Bytes when using `--display-config`) + +_**Config Example:**_ `space_reservation = "100"` + +_**CLI Option Use:**_ `--space-reservation '100'` + +### sync_business_shared_items +_**Description:**_ This configuration option controls whether OneDrive Business | Office 365 Shared Folders, when added as a 'shortcut' to your 'My Files' will be synced to your local system. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `sync_business_shared_items = "false"` or `sync_business_shared_items = "true"` + +_**CLI Option Use:**_ *none* - this is a config file option only + +_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync. + +### sync_dir +_**Description:**_ This configuration option determines the location on your local filesystem where your data from Microsoft OneDrive will be saved. + +_**Value Type:**_ String + +_**Default Value:**_ `~/OneDrive` + +_**Config Example:**_ `sync_dir = "~/MyDirToSync"` + +_**CLI Option Use:**_ `--syncdir '~/MyDirToSync'` + +_**Additional Usage Notes:**_ After changing this option, you will be required to perform a resync. + +### sync_dir_permissions +_**Description:**_ This configuration option defines the directory permissions applied when a new directory is created locally during the process of syncing your data from Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ `700` - This provides the following permissions: `drwx------` + +_**Config Example:**_ `sync_dir_permissions = "700"` + +_**Additional Usage Notes:**_ Use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. You will need to manually update all existing directory permissions if you modify this value. + +### sync_file_permissions +_**Description:**_ This configuration option defines the file permissions applied when a new file is created locally during the process of syncing your data from Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ `600` - This provides the following permissions: `-rw-------` + +_**Config Example:**_ `sync_file_permissions = "600"` + +_**Additional Usage Notes:**_ Use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. You will need to manually update all existing directory permissions if you modify this value. + +### sync_root_files +_**Description:**_ This configuration option manages the synchronisation of files located in the 'sync_dir' root when using a 'sync_list.' It enables you to sync all these files by default, eliminating the need to repeatedly modify your 'sync_list' and initiate resynchronisation. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `sync_root_files = "false"` or `sync_root_files = "true"` + +_**CLI Option Use:**_ `--sync-root-files` + +_**Additional Usage Notes:**_ Although it's not mandatory, it's recommended that after enabling this option, you perform a `--resync`. This ensures that any previously excluded content is now included in your sync process. + +### upload_only +_**Description:**_ This setting forces the client to only upload data to Microsoft OneDrive and replicate the locate state online. By default, this will also remove content online, that has been removed locally. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `upload_only = "false"` or `upload_only = "true"` + +_**CLI Option Use:**_ `--upload-only` + +_**Additional Usage Notes:**_ To ensure that data deleted locally remains accessible online, you can use the 'no_remote_delete' option. If you want to delete the data from your local storage after a successful upload to Microsoft OneDrive, you can use the 'remove_source_files' option. + +### user_agent +_**Description:**_ This configuration option controls the 'User-Agent' request header that is presented to Microsoft Graph API when accessing the Microsoft OneDrive service. This string lets servers and network peers identify the application, operating system, vendor, and/or version of the application making the request. We recommend users not to tamper with this option unless strictly necessary. + +_**Value Type:**_ String + +_**Default Value:**_ `ISV|abraunegg|OneDrive Client for Linux/vX.Y.Z-A-bcdefghi` + +_**Config Example:**_ `user_agent = "ISV|CompanyName|AppName/Version"` + +_**Additional Usage Notes:**_ The current value conforms the the Microsoft Graph API documentation for presenting an appropriate 'User-Agent' header and aligns to the registered 'application_id' that this application uses. + +### webhook_enabled +_**Description:**_ + +_**Value Type:**_ + +_**Default Value:**_ + +_**Config Example:**_ + +### webhook_expiration_interval +_**Description:**_ + +_**Value Type:**_ + +_**Default Value:**_ + +_**Config Example:**_ + +### webhook_listening_host +_**Description:**_ + +_**Value Type:**_ + +_**Default Value:**_ + +_**Config Example:**_ + +### webhook_listening_port +_**Description:**_ + +_**Value Type:**_ + +_**Default Value:**_ + +_**Config Example:**_ + +### webhook_public_url +_**Description:**_ + +_**Value Type:**_ + +_**Default Value:**_ + +_**Config Example:**_ + +### webhook_renewal_interval +_**Description:**_ + +_**Value Type:**_ + +_**Default Value:**_ + +_**Config Example:**_ + + + +## Command Line Interface (CLI) Only Options + +### CLI Option: --auth-files +_**Description:**_ This CLI option allows the user to perform application authentication not via an interactive dialog but via specific files that the application uses to read the authentication data from. + +_**Usage Example:**_ `onedrive --auth-files authUrl:responseUrl` + +_**Additional Usage Notes:**_ The authorisation URL is written to the specified 'authUrl' file, then onedrive waits for the file 'responseUrl' to be present, and reads the authentication response from that file. Example: + +```text +onedrive --auth-files '~/onedrive-auth-url:~/onedrive-response-url' +Reading configuration file: /home/alex/.config/onedrive/config +Configuration file successfully loaded +Configuring Global Azure AD Endpoints +Client requires authentication before proceeding. Waiting for --auth-files elements to be available. +``` +At this point, the client has written the file `~/onedrive-auth-url` which contains the authentication URL that needs to be visited to perform the authentication process. The client will now wait and watch for the presence of the file `~/onedrive-response-url`. + +Visit the authentication URL, and then create a new file called `~/onedrive-response-url` with the response URI. Once this has been done, the application will acknowledge the presence of this file, read the contents, and authenticate the application. +```text +Sync Engine Initialised with new Onedrive API instance + + --sync or --monitor switches missing from your command line input. Please add one (not both) of these switches to your command line or use 'onedrive --help' for further assistance. + +No OneDrive sync will be performed without one of these two arguments being present. +``` + +### CLI Option: --auth-response +_**Description:**_ This CLI option allows the user to perform application authentication not via an interactive dialog but via providing the authentication response URI directly. + +_**Usage Example:**_ `onedrive --auth-response https://login.microsoftonline.com/common/oauth2/nativeclient?code=` + +_**Additional Usage Notes:**_ Typically, unless the application client identifier, authentication scopes are being modified or a specific Azure Tenant is being specified, the authentication URL will mostlikely be as follows: +```text +https://login.microsoftonline.com/common/oauth2/v2.0/authorise?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient +``` +With this URL being known, it is possible ahead of time to request an authentication token by visiting this URL, and performing the authenticaton access request. + +### CLI Option: --confdir +_**Description:**_ This CLI option allows the user to specify where all the application configuration and relevant components are stored. + +_**Usage Example:**_ `onedrive --confdir '~/.config/onedrive-business/'` + +_**Additional Usage Notes:**_ If using this option, it must be specified each and every time the application is used. If this is ommited, the application default configuration directory will be used. + +### CLI Option: --create-directory +_**Description:**_ This CLI option allows the user to create the specified directory path on Microsoft OneDrive without performing a sync. + +_**Usage Example:**_ `onedrive --create-directory 'path/of/new/folder/structure/to/create/'` + +_**Additional Usage Notes:**_ The specified path to create is relative to your configured 'sync_dir'. + +### CLI Option: --create-share-link +_**Description:**_ This CLI option enables the creation of a shareable file link that can be provided to users to access the file that is stored on Microsoft OneDrive. By default, the permissions for the file will be 'read-only'. + +_**Usage Example:**_ `onedrive --create-share-link 'relative/path/to/your/file.txt'` + +_**Additional Usage Notes:**_ If writable access to the file is required, you must add `--with-editing-perms` to your command. See below for details. + +### CLI Option: --destination-directory +_**Description:**_ This CLI option specifies the 'destination' portion of moving a file or folder online, without performing a sync operation. + +_**Usage Example:**_ `onedrive --source-directory 'path/as/source/' --destination-directory 'path/as/destination'` + +_**Additional Usage Notes:**_ All specified paths are relative to your configured 'sync_dir'. + +### CLI Option: --display-config +_**Description:**_ This CLI option will display the effective application configuration + +_**Usage Example:**_ `onedrive --display-config` + +### CLI Option: --display-sync-status +_**Description:**_ This CLI option will display the sync status of the configured 'sync_dir' + +_**Usage Example:**_ `onedrive --display-sync-status` + +_**Additional Usage Notes:**_ This option can also use the `--single-directory` option to determine the sync status of a specific directory within the configured 'sync_dir' + +### CLI Option: --force +_**Description:**_ This CLI option enables the force the deletion of data when a 'big delete' is detected. + +_**Usage Example:**_ `onedrive --sync --verbose --force` + +_**Additional Usage Notes:**_ This option should only be used exclusively in cases where you've initiated a 'big delete' and genuinely intend to remove all the data that is set to be deleted online. + +### CLI Option: --force-sync +_**Description:**_ This CLI option enables the syncing of a specific directory, using the Client Side Filtering application defaults, overriding any user application configuration. + +_**Usage Example:**_ `onedrive --sync --verbose --force-sync --single-directory 'Data' + +_**Additional Usage Notes:**_ When this option is used, you will be presented with the following warning and risk acceptance: +```text +WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --synch --single-directory --force-sync being used + +The use of --force-sync will reconfigure the application to use defaults. This may have untold and unknown future impacts. +By proceeding in using this option you accept any impacts including any data loss that may occur as a result of using --force-sync. + +Are you sure you wish to proceed with --force-sync [Y/N] +``` +To procceed with this sync task, you must risk accept the actions you are taking. If you have any concerns, first use `--dry-run` and evaluate the outcome before proceeding with the actual action. + +### CLI Option: --get-file-link +_**Description:**_ This CLI option queries the OneDrive API and return's the WebURL for the given local file. + +_**Usage Example:**_ `onedrive --get-file-link 'relative/path/to/your/file.txt'` + +_**Additional Usage Notes:**_ The path that you should use must be relative to your 'sync_dir' + +### CLI Option: --get-sharepoint-drive-id +_**Description:**_ This CLI option queries the OneDrive API and return's the Office 365 Drive ID for a given Office 365 SharePoint Shared Library that can then be used with 'drive_id' to sync a specific SharePoint Library. + +_**Usage Example:**_ `onedrive --get-sharepoint-drive-id '*'` or `onedrive --get-sharepoint-drive-id 'PointPublishing Hub Site'` + +### CLI Option: --logout +_**Description:**_ This CLI option removes this clients authentictaion status with Microsoft OneDrive. Any further application use will requrie the application to be re-authenticated with Microsoft OneDrive. + +_**Usage Example:**_ `onedrive --logout` + +### CLI Option: --modified-by +_**Description:**_ This CLI option queries the OneDrive API and return's the last modified details for the given local file. + +_**Usage Example:**_ `onedrive --modified-by 'relative/path/to/your/file.txt'` + +_**Additional Usage Notes:**_ The path that you should use must be relative to your 'sync_dir' + +### CLI Option: --monitor | -m +_**Description:**_ This CLI option controls the 'Monitor Mode' operational aspect of the client. When this option is used, the client will perform on-going syncs of data between Microsoft OneDrive and your local system. Local changes will be uploaded in near-realtime, whilst online changes will be downloaded on the next sync process. The frequency of these checks is governed by the 'monitor_interval' value. + +_**Usage Example:**_ `onedrive --monitor` or `onedrive -m` + +### CLI Option: --print-access-token +_**Description:**_ Print the current access token being used to access Microsoft OneDrive. + +_**Usage Example:**_ `onedrive --verbose --verbose --debug-https --print-access-token` + +_**Additional Usage Notes:**_ Do not use this option if you do not know why you are wanting to use it. Be highly cautious of exposing this object. Change your password if you feel that you have inadvertantly exposed this token. + +### CLI Option: --reauth +_**Description:**_ This CLI option controls the ability to re-authenticate your client with Microsoft OneDrive. + +_**Usage Example:**_ `onedrive --reauth` + +### CLI Option: --remove-directory +_**Description:**_ This CLI option allows the user to remove the specified directory path on Microsoft OneDrive without performing a sync. + +_**Usage Example:**_ `onedrive --remove-directory 'path/of/new/folder/structure/to/remove/'` + +_**Additional Usage Notes:**_ The specified path to remove is relative to your configured 'sync_dir'. + +### CLI Option: --single-directory +_**Description:**_ This CLI option controls the applications ability to sync a specific single directory. + +_**Usage Example:**_ `onedrive --sync --single-directory 'Data'` + +_**Additional Usage Notes:**_ The path specified is relative to your configured 'sync_dir' path. If the physical local path 'Folder' to sync is `~/OneDrive/Data/Folder` then the command would be `--single-directory 'Data/Folder'`. + +### CLI Option: --source-directory +_**Description:**_ This CLI option specifies the 'source' portion of moving a file or folder online, without performing a sync operation. + +_**Usage Example:**_ `onedrive --source-directory 'path/as/source/' --destination-directory 'path/as/destination'` + +_**Additional Usage Notes:**_ All specified paths are relative to your configured 'sync_dir'. + +### CLI Option: --sync | -s +_**Description:**_ This CLI option controls the 'Standalone Mode' operational aspect of the client. When this option is used, the client will perform a one-time sync of data between Microsoft OneDrive and your local system. + +_**Usage Example:**_ `onedrive --sync` or `onedrive -s` + +### CLI Option: --verbose | -v+ +_**Description:**_ This CLI option controls the verbosity of the application output. Use the option once, to have normal verbose output, use twice to have debug level application output. + +_**Usage Example:**_ `onedrive --sync --verbose` or `onedrive --monitor --verbose` + +### CLI Option: --with-editing-perms +_**Description:**_ This CLI option enables the creation of a writable shareable file link that can be provided to users to access the file that is stored on Microsoft OneDrive. This option can only be used in conjunction with `--create-share-link` + +_**Usage Example:**_ `onedrive --create-share-link 'relative/path/to/your/file.txt' --with-editing-perms` + +_**Additional Usage Notes:**_ Placement of `--with-editing-perms` is critical. It *must* be placed after the file path as per the example above. + +## Depreciated Configuration File and CLI Options +The following configuration options are no longer supported + +### min_notify_changes +_**Description:**_ Minimum number of pending incoming changes necessary to trigger a GUI desktop notification. + +_**Depreciated Config Example:**_ `min_notify_changes = "50"` + +_**Depreciated CLI Option:**_ `--min-notify-changes '50'` + +_**Reason for depreciation:**_ Application has been totally re-written. When this item was introduced, it was done so to reduce spamming of all events to the GUI desktop. + +### CLI Option: --synchronize +_**Description:**_ Perform a synchronisation with Microsoft OneDrive + +_**Depreciated CLI Option:**_ `--synchronize` + +_**Reason for depreciation:**_ `--synchronize` has been depreciated in favour of `--sync` or `-s` diff --git a/docs/business-shared-folders.md b/docs/business-shared-folders.md new file mode 100644 index 000000000..4282f4ac6 --- /dev/null +++ b/docs/business-shared-folders.md @@ -0,0 +1,40 @@ +# How to configure OneDrive Business Shared Folder Sync +## Application Version +Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. + +## Important Note +This feature has been 100% re-written from v2.5.0 onwards. A pre-requesite before using this capability in v2.5.0 and above is for you to revert any Shared Business Folder configuration you may be currently using, including, but not limited to: +* Removing `sync_business_shared_folders = "true|false"` from your 'config' file +* Removing the 'business_shared_folders' file +* Removing any local data | shared folder data from your configured 'sync_dir' to ensure that there are no conflicts or issues. + +## Process Overview +Syncing OneDrive Business Shared Folders requires additional configuration for your 'onedrive' client: +1. From the OneDrive web interface, review the 'Shared' objects that have been shared with you. +2. Select the applicable folder, and click the 'Add shortcut to My files', which will then add this to your 'My files' folder +3. Update your OneDrive Client for Linux 'config' file to enable the feature by adding `sync_business_shared_items = "true"`. Adding this option will trigger a `--resync` requirement. +4. Test the configuration using '--dry-run' +5. Remove the use of '--dry-run' and sync the OneDrive Business Shared folders as required + + +**NOTE:** This documentation will be updated as this feature progresses. + + +### Enable syncing of OneDrive Business Shared Folders via config file +```text +sync_business_shared_items = "true" +``` + +### Disable syncing of OneDrive Business Shared Folders via config file +```text +sync_business_shared_items = "false" +``` + +## Known Issues +Shared folders, shared with you from people outside of your 'organisation' are unable to be synced. This is due to the Microsoft Graph API not presenting these folders. + +Shared folders that match this scenario, when you view 'Shared' via OneDrive online, will have a 'world' symbol as per below: + +![shared_with_me](./images/shared_with_me.JPG) + +This issue is being tracked by: [#966](https://github.com/abraunegg/onedrive/issues/966) diff --git a/docs/Docker.md b/docs/docker.md similarity index 97% rename from docs/Docker.md rename to docs/docker.md index 41899082d..d142fef8e 100644 --- a/docs/Docker.md +++ b/docs/docker.md @@ -66,7 +66,7 @@ ROOT level privileges prohibited! ### 3. First run The 'onedrive' client within the Docker container needs to be authorized with your Microsoft account. This is achieved by initially running docker in interactive mode. -Run the docker image with the commands below and make sure to change `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `"/home/abraunegg/OneDrive"`). +Run the docker image with the commands below and make sure to change the value of `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `export ONEDRIVE_DATA_DIR="/home/abraunegg/OneDrive"`). ```bash export ONEDRIVE_DATA_DIR="${HOME}/OneDrive" mkdir -p ${ONEDRIVE_DATA_DIR} @@ -168,7 +168,7 @@ docker volume inspect onedrive_conf Or you can map your own config folder to the config volume. Make sure to copy all files from the docker volume into your mapped folder first. -The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#configuration) +The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#configuration) ### 7. Sync multiple accounts There are many ways to do this, the easiest is probably to @@ -211,7 +211,7 @@ docker run $firstRun --restart unless-stopped --name onedrive -v onedrive_conf:/ | ONEDRIVE_LOGOUT | Controls "--logout" switch. Default is 0 | 1 | | ONEDRIVE_REAUTH | Controls "--reauth" switch. Default is 0 | 1 | | ONEDRIVE_AUTHFILES | Controls "--auth-files" option. Default is "" | "authUrl:responseUrl" | -| ONEDRIVE_AUTHRESPONSE | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#authorize-the-application-with-your-onedrive-account) | +| ONEDRIVE_AUTHRESPONSE | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#authorize-the-application-with-your-onedrive-account) | | ONEDRIVE_DISPLAY_CONFIG | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 | | ONEDRIVE_SINGLE_DIRECTORY | Controls "--single-directory" option. Default = "" | "mydir" | diff --git a/docs/INSTALL.md b/docs/install.md similarity index 98% rename from docs/INSTALL.md rename to docs/install.md index 3f00ae212..c3e126c36 100644 --- a/docs/INSTALL.md +++ b/docs/install.md @@ -211,8 +211,10 @@ sudo make install ``` ### Build options -Notifications can be enabled using the `configure` switch `--enable-notifications`. +#### GUI Notification Support +GUI notification support can be enabled using the `configure` switch `--enable-notifications`. +#### systemd service directory customisation support Systemd service files are installed in the appropriate directories on the system, as provided by `pkg-config systemd` settings. If the need for overriding the deduced path are necessary, the two options `--with-systemdsystemunitdir` (for @@ -220,9 +222,11 @@ the Systemd system unit location), and `--with-systemduserunitdir` (for the Systemd user unit location) can be specified. Passing in `no` to one of these options disabled service file installation. +#### Additional Compiler Debug By passing `--enable-debug` to the `configure` call, `onedrive` gets built with additional debug information, useful (for example) to get `perf`-issued figures. +#### Shell Completion Support By passing `--enable-completions` to the `configure` call, shell completion functions are installed for `bash`, `zsh` and `fish`. The installation directories are determined as far as possible automatically, but can be overridden by passing diff --git a/docs/Podman.md b/docs/podman.md similarity index 97% rename from docs/Podman.md rename to docs/podman.md index 7f3a79d12..62b3af0be 100644 --- a/docs/Podman.md +++ b/docs/podman.md @@ -59,7 +59,7 @@ This will create a podman volume labeled `onedrive_conf`, where all configuratio ### 2. First run The 'onedrive' client within the container needs to be authorized with your Microsoft account. This is achieved by initially running podman in interactive mode. -Run the podman image with the commands below and make sure to change `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `"/home/abraunegg/OneDrive"`). +Run the podman image with the commands below and make sure to change the value of `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `export ONEDRIVE_DATA_DIR="/home/abraunegg/OneDrive"`). It is a requirement that the container be run using a non-root uid and gid, you must insert a non-root UID and GID (e.g.` export ONEDRIVE_UID=1000` and export `ONEDRIVE_GID=1000`). @@ -188,7 +188,7 @@ podman volume inspect onedrive_conf ``` Or you can map your own config folder to the config volume. Make sure to copy all files from the volume into your mapped folder first. -The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#configuration) +The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#configuration) ### 7. Sync multiple accounts There are many ways to do this, the easiest is probably to @@ -219,7 +219,7 @@ podman run -it --restart unless-stopped --name onedrive_work \ | ONEDRIVE_LOGOUT | Controls "--logout" switch. Default is 0 | 1 | | ONEDRIVE_REAUTH | Controls "--reauth" switch. Default is 0 | 1 | | ONEDRIVE_AUTHFILES | Controls "--auth-files" option. Default is "" | "authUrl:responseUrl" | -| ONEDRIVE_AUTHRESPONSE | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#authorize-the-application-with-your-onedrive-account) | +| ONEDRIVE_AUTHRESPONSE | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#authorize-the-application-with-your-onedrive-account) | | ONEDRIVE_DISPLAY_CONFIG | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 | | ONEDRIVE_SINGLE_DIRECTORY | Controls "--single-directory" option. Default = "" | "mydir" | diff --git a/docs/SharePoint-Shared-Libraries.md b/docs/sharepoint-libraries.md similarity index 100% rename from docs/SharePoint-Shared-Libraries.md rename to docs/sharepoint-libraries.md diff --git a/docs/usage.md b/docs/usage.md new file mode 100644 index 000000000..24e24f693 --- /dev/null +++ b/docs/usage.md @@ -0,0 +1,900 @@ +# Using the OneDrive Client for Linux +## Application Version +Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. + +## Table of Contents + +- [Important Notes](#important-notes) + - [Upgrading from the 'skilion' Client](#upgrading-from-the-sklion-client) + - [Naming Conventions for Local Files and Folders](#naming-conventions-for-local-files-and-folders) + - [Compatibility with curl](#compatibility-with-curl) +- [First Steps](#first-steps) + - [Authorise the Application with Your Microsoft OneDrive Account](#authorise-the-application-with-your-microsoft-onedrive-account) + - [Display Your Applicable Runtime Configuration](#display-your-applicable-runtime-configuration) + - [Understanding OneDrive Client for Linux Operational Modes](#understanding-onedrive-client-for-linux-operational-modes) + - [Standalone Synchronisation Operational Mode (Standalone Mode)](#standalone-synchronisation-operational-mode-standalone-mode) + - [Ongoing Synchronisation Operational Mode (Monitor Mode)](#ongoing-synchronisation-operational-mode-monitor-mode) + - [Increasing application logging level](#increasing-application-logging-level) + - [Testing your configuration](#testing-your-configuration) + - [Performing a sync with Microsoft OneDrive](#performing-a-sync-with-microsoft-onedrive) + - [Performing a single directory synchronisation with Microsoft OneDrive](#performing-a-single-directory-synchronisation-with-microsoft-onedrive) + - [Performing a 'one-way' download synchronisation with Microsoft OneDrive](#performing-a-one-way-download-synchronisation-with-microsoft-onedrive) + - [Performing a 'one-way' upload synchronisation with Microsoft OneDrive](#performing-a-one-way-upload-synchronisation-with-microsoft-onedrive) + - [Performing a selective synchronisation via 'sync_list' file](#performing-a-selective-synchronisation-via-sync_list-file) + - [Performing a --resync](#performing-a-resync) + - [Performing a --force-sync without a --resync or changing your configuration](#performing-a-force-sync-without-a-resync-or-changing-your-configuration) + - [Enabling the Client Activity Log](#enabling-the-client-activity-log) + - [Client Activity Log Example:](#client-activity-log-example) + - [Client Activity Log Differences](#client-activity-log-differences) + - [GUI Notifications](#gui-notifications) + - [Handling a Microsoft OneDrive Account Password Change](#handling-a-microsoft-onedrive-account-password-change) + - [Determining the synchronisation result](#determining-the-synchronisation-result) +- [Frequently Asked Configuration Questions](#frequently-asked-configuration-questions) + - [How to change the default configuration of the client?](#how-to-change-the-default-configuration-of-the-client) + - [How to change where my data from Microsoft OneDrive is stored?](#how-to-change-where-my-data-from-microsoft-onedrive-is-stored) + - [How to change what file and directory permissions are assigned to data that is downloaded from Microsoft OneDrive?](#how-to-change-what-file-and-directory-permissions-are-assigned-to-data-that-is-downloaded-from-microsoft-onedrive) + - [How to only sync a specific directory?](#how-to-only-sync-a-specific-directory) + - [How to 'skip' files from syncing?](#how-to-skip-files-from-syncing) + - [How to 'skip' directories from syncing?](#how-to-skip-directories-from-syncing) + - [How to 'skip' dot files and folders from syncing?](#how-to-skip-dot-files-and-folders-from-syncing) + - [How to 'skip' files larger than a certain size from syncing?](#how-to-skip-files-larger-than-a-certain-size-from-syncing) + - [How to 'rate limit' the application to control bandwidth consumed for upload & download operations?](#how-to-rate-limit-the-application-to-control-bandwidth-consumed-for-upload--download-operations) + - [How can I prevent my local disk from filling up?](#how-can-i-prevent-my-local-disk-from-filling-up) + - [How does the client handle symbolic links?](#how-does-the-client-handle-symbolic-links) + - [How to synchronise shared folders (OneDrive Personal)?](#how-to-synchronise-shared-folders-onedrive-personal) + - [How to synchronise shared folders (OneDrive Business or Office 365)?](#how-to-synchronise-shared-folders-onedrive-business-or-office-365) + - [How to synchronise SharePoint / Office 365 Shared Libraries?](#how-to-synchronise-sharepoint--office-365-shared-libraries) + - [How to Create a Shareable Link?](#how-to-create-a-shareable-link) + - [How to Synchronise Both Personal and Business Accounts at once?](#how-to-synchronise-both-personal-and-business-accounts-at-once) + - [How to Synchronise Multiple SharePoint Libraries simultaneously?](#how-to-synchronise-multiple-sharepoint-libraries-simultaneously) + - [How to Receive Real-time Changes from Microsoft OneDrive Service, instead of waiting for the next sync period?](#how-to-receive-real-time-changes-from-microsoft-onedrive-service-instead-of-waiting-for-the-next-sync-period) + - [How to initiate the client as a background service?](#how-to-initiate-the-client-as-a-background-service) + - [OneDrive service running as root user via init.d](#onedrive-service-running-as-root-user-via-initd) + - [OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-root-user-via-systemd-arch-ubuntu-debian-opensuse-fedora) + - [OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux)](#onedrive-service-running-as-root-user-via-systemd-red-hat-enterprise-linux-centos-linux) + - [OneDrive service running as a non-root user via systemd (All Linux Distributions)](#onedrive-service-running-as-a-non-root-user-via-systemd-all-linux-distributions) + - [OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-a-non-root-user-via-systemd-with-notifications-enabled-arch-ubuntu-debian-opensuse-fedora) + - [OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void)](#onedrive-service-running-as-a-non-root-user-via-runit-antix-devuan-artix-void) + - [How to start a user systemd service at boot without user login?](#how-to-start-a-user-systemd-service-at-boot-without-user-login) + +## Important Notes +### Upgrading from the 'skilion' Client +The 'skilion' version has a significant number of issues in how it manages the local sync state. When upgrading from the 'skilion' client to this client, it's recommended to stop any service or OneDrive process that may be running. Once all OneDrive services are stopped, make sure to remove any old client binaries from your system. + +Furthermore, if you're using a 'config' file within your configuration directory (`~/.config/onedrive/`), please ensure that you update the `skip_file = ` option as shown below: + +**Invalid 'skilion' configuration:** +```text +skip_file = ".*|~*" +``` +**Minimum valid configuration:** +```text +skip_file = "~*" +``` +**Default valid configuration:** +```text +skip_file = "~*|.~*|*.tmp|*.swp|*.partial" +``` + +Avoid using a 'skip_file' entry of `.*` as it may prevent the correct detection of local changes to process. The configuration values for 'skip_file' will be checked for validity, and if there is an issue, the following error message will be displayed: +```text +ERROR: Invalid skip_file entry '.*' detected +``` + +### Naming Conventions for Local Files and Folders +In the synchronisation directory, it's crucial to adhere to the [Windows naming conventions](https://docs.microsoft.com/windows/win32/fileio/naming-a-file) for your files and folders. + +If you happen to have two files with the same names but different capitalisation, our application will make an effort to handle it. However, when there's a clash within the namespace, the file causing the conflict won't be synchronised. Please note that this behaviour is intentional and won't be addressed. + +### Compatibility with curl +If your system uses curl < 7.47.0, curl will default to HTTP/1.1 for HTTPS operations, and the client will follow suit, using HTTP/1.1. + +For systems running curl >= 7.47.0 and < 7.62.0, curl will prefer HTTP/2 for HTTPS, but it will still use HTTP/1.1 as the default for these operations. The client will employ HTTP/1.1 for HTTPS operations as well. + +However, if your system employs curl >= 7.62.0, curl will, by default, prioritise HTTP/2 over HTTP/1.1. In this case, the client will utilise HTTP/2 for most HTTPS operations and stick with HTTP/1.1 for others. Please note that this distinction is governed by the OneDrive platform, not our client. + +If you explicitly want to use HTTP/1.1, you can do so by using the `--force-http-11` flag or setting the configuration option `force_http_11 = "true"`. This will compel the application to exclusively use HTTP/1.1. Otherwise, all client operations will align with the curl default settings for your distribution. + +## First Steps +### Authorise the Application with Your Microsoft OneDrive Account +Once you've installed the application, you'll need to authorise it using your Microsoft OneDrive Account. This can be done by simply running the application without any additional command switches. + +Please be aware that some companies may require you to explicitly add this app to the [Microsoft MyApps portal](https://myapps.microsoft.com/). To add an approved app to your apps, click on the ellipsis in the top-right corner and select "Request new apps." On the next page, you can add this app. If it's not listed, you should make a request through your IT department. + +When you run the application for the first time, you'll be prompted to open a specific URL using your web browser, where you'll need to log in to your Microsoft Account and grant the application permission to access your files. After granting permission to the application, you'll be redirected to a blank page. Simply copy the URI from the blank page and paste it into the application. + +**Example:** +```text +[user@hostname ~]$ onedrive +Authorise this app by visiting: + +https://login.microsoftonline.com/common/oauth2/v2.0/authorise?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient + +Enter the response URI from your browser: https://login.microsoftonline.com/common/oauth2/nativeclient?code= + +The application has been successfully authorised, but no additional command switches were provided. + +Please use 'onedrive --help' for further assistance on how to run this application. +``` + +### Display Your Applicable Runtime Configuration +To verify the configuration that the application will use, use the following command: +```text +onedrive --display-config +``` +This command will display all the relevant runtime interpretations of the options and configurations you are using. An example output is as follows: +```text +Reading configuration file: /home/user/.config/onedrive/config +Configuration file successfully loaded +onedrive version = vX.Y.Z-A-bcdefghi +Config path = /home/user/.config/onedrive +Config file found in config path = true +Config option 'drive_id' = +Config option 'sync_dir' = ~/OneDrive +... +Config option 'webhook_enabled' = false +``` + +### Understanding OneDrive Client for Linux Operational Modes +There are two modes of operation when using the client: +1. Standalone sync mode that performs a single sync action against Microsoft OneDrive. +2. Ongoing sync mode that continuously syncs your data with Microsoft OneDrive. + +#### Standalone Synchronisation Operational Mode (Standalone Mode) +This method of use can be employed by issuing the following option to the client: +```text +onedrive --sync +``` +For simplicity, this can be reduced to the following: +```text +onedrive -s +``` + +#### Ongoing Synchronisation Operational Mode (Monitor Mode) +This method of use can be utilised by issuing the following option to the client: +```text +onedrive --monitor +``` +For simplicity, this can be shortened to the following: +```text +onedrive -m +``` +**Note:** This method of use is typically employed when enabling a systemd service to run the application in the background. + +Two common errors can occur when using monitor mode: +* Initialisation failure +* Unable to add a new inotify watch + +Both of these errors are local environment issues, where the following system variables need to be increased as the current system values are potentially too low: +* `fs.file-max` +* `fs.inotify.max_user_watches` + +To determine what the existing values are on your system, use the following commands: +```text +sysctl fs.file-max +sysctl fs.inotify.max_user_watches +``` +Alternatively, when running the client with increased verbosity (see below), the client will display what the current configured system maximum values are: +```text +... +All application operations will be performed in: /home/user/OneDrive +OneDrive synchronisation interval (seconds): 300 +Maximum allowed open files: 393370 <-- This is the fs.file-max value +Maximum allowed inotify watches: 29374 <-- This is the fs.inotify.max_user_watches value +Initialising filesystem inotify monitoring ... +... +``` +To determine what value to change to, you need to count all the files and folders in your configured 'sync_dir': +```text +cd /path/to/your/sync/dir +ls -laR | wc -l +``` + +To make a change to these variables using your file and folder count, use the following process: +```text +sudo sysctl fs.file-max= +sudo sysctl fs.inotify.max_user_watches= +``` +Once these values are changed, you will need to restart your client so that the new values are detected and used. + +To make these changes permanent on your system, refer to your OS reference documentation. + +### Increasing application logging level +When running a sync (`--sync`) or using monitor mode (`--monitor`), it may be desirable to see additional information regarding the progress and operation of the client. For example, for a `--sync` command, this would be: +```text +onedrive --sync --verbose +``` +Furthermore, for simplicity, this can be simplified to the following: +``` +onedrive -s -v +``` +Adding `--verbose` twice will enable debug logging output. This is generally required when raising a bug report or needing to understand a problem. + +### Testing your configuration +You can test your configuration by utilising the `--dry-run` CLI option. No files will be downloaded, uploaded, or removed; however, the application will display what 'would' have occurred. For example: +```text +onedrive --sync --verbose --dry-run +Reading configuration file: /home/user/.config/onedrive/config +Configuration file successfully loaded +Using 'user' Config Dir: /home/user/.config/onedrive +DRY-RUN Configured. Output below shows what 'would' have occurred. +DRY-RUN: Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations +DRY RUN: Not creating backup config file as --dry-run has been used +DRY RUN: Not updating hash files as --dry-run has been used +Checking Application Version ... +Attempting to initialise the OneDrive API ... +Configuring Global Azure AD Endpoints +The OneDrive API was initialised successfully +Opening the item database ... +Sync Engine Initialised with new Onedrive API instance +Application version: vX.Y.Z-A-bcdefghi +Account Type: +Default Drive ID: +Default Root ID: +Remaining Free Space: 1058488129 KB +All application operations will be performed in: /home/user/OneDrive +Fetching items from the OneDrive API for Drive ID: .. +... +Performing a database consistency and integrity check on locally stored data ... +Processing DB entries for this Drive ID: +Processing ~/OneDrive +The directory has not changed +... +Scanning local filesystem '~/OneDrive' for new data to upload ... +... +Performing a final true-up scan of online data from Microsoft OneDrive +Fetching items from the OneDrive API for Drive ID: .. + +Sync with Microsoft OneDrive is complete +``` + +### Performing a sync with Microsoft OneDrive +By default, all files are downloaded in `~/OneDrive`. This download location is controlled by the 'sync_dir' config option. + +After authorising the application, a sync of your data can be performed by running: +```text +onedrive --sync +``` +This will synchronise files from your Microsoft OneDrive account to your `~/OneDrive` local directory or to your specified 'sync_dir' location. + +If you prefer to use your local files as stored in `~/OneDrive` as your 'source of truth,' use the following sync command: +```text +onedrive --sync --local-first +``` + +### Performing a single directory synchronisation with Microsoft OneDrive +In some cases, it may be desirable to synchronise a single directory under ~/OneDrive without having to change your client configuration. To do this, use the following command: +```text +onedrive --sync --single-directory '' +``` + +**Example:** If the full path is `~/OneDrive/mydir`, the command would be `onedrive --sync --single-directory 'mydir'` + +### Performing a 'one-way' download synchronisation with Microsoft OneDrive +In some cases, it may be desirable to 'download only' from Microsoft OneDrive. To do this, use the following command: +```text +onedrive --sync --download-only +``` +This will download all the content from Microsoft OneDrive to your `~/OneDrive` location. Any files that are deleted online remain locally and will not be removed. + +However, in some circumstances, it may be desirable to clean up local files that have been removed online. To do this, use the following command: + +```text +onedrive --sync --download-only --cleanup-local-files +``` + +### Performing a 'one-way' upload synchronisation with Microsoft OneDrive +In some cases, it may be desirable to 'upload only' to Microsoft OneDrive. To do this, use the following command: +```text +onedrive --sync --upload-only +``` +**Note:** If a file or folder is present on Microsoft OneDrive, which was previously synchronised and now does not exist locally, that item will be removed from Microsoft OneDrive online. If the data on Microsoft OneDrive should be kept, the following should be used: +```text +onedrive --sync --upload-only --no-remote-delete +``` +**Note:** The operation of 'upload only' does not request data from Microsoft OneDrive about what 'other' data exists online. The client only knows about the data that 'this' client uploaded, thus any files or folders created or uploaded outside of this client will remain untouched online. + +### Performing a selective synchronisation via 'sync_list' file +Selective synchronisation allows you to sync only specific files and directories. +To enable selective synchronisation, create a file named `sync_list` in your application configuration directory (default is `~/.config/onedrive`). + +Important points to understand before using 'sync_list'. +* 'sync_list' excludes _everything_ by default on OneDrive. +* 'sync_list' follows an _"exclude overrides include"_ rule, and requires **explicit inclusion**. +* Order exclusions before inclusions, so that anything _specifically included_ is included. +* How and where you place your `/` matters for excludes and includes in subdirectories. + +Each line of the file represents a relative path from your `sync_dir`. All files and directories not matching any line of the file will be skipped during all operations. + +Additionally, the use of `/` is critically important to determine how a rule is interpreted. It is very similar to `**` wildcards, for those that are familiar with globbing patterns. +Here is an example of `sync_list`: +```text +# sync_list supports comments +# +# The ordering of entries is highly recommended - exclusions before inclusions +# +# Exclude temp folder(s) or file(s) under Documents folder(s), anywhere in OneDrive +!Documents/temp* +# +# Exclude secret data folder in root directory only +!/Secret_data/* +# +# Include everything else in root directory +/* +# +# Include my Backup folder(s) or file(s) anywhere on OneDrive +Backup +# +# Include my Backup folder in root +/Backup/ +# +# Include Documents folder(s) anywhere in OneDrive +Documents/ +# +# Include all PDF files in Documents folder(s), anywhere in OneDrive +Documents/*.pdf +# +# Include this single document in Documents folder(s), anywhere in OneDrive +Documents/latest_report.docx +# +# Include all Work/Project directories or files, inside 'Work' folder(s), anywhere in OneDrive +Work/Project* +# +# Include all "notes.txt" files, anywhere in OneDrive +notes.txt +# +# Include /Blender in the ~OneDrive root but not if elsewhere in OneDrive +/Blender +# +# Include these directories(or files) in 'Pictures' folder(s), that have a space in their name +Pictures/Camera Roll +Pictures/Saved Pictures +# +# Include these names if they match any file or folder +Cinema Soc +Codes +Textbooks +Year 2 +``` +The following are supported for pattern matching and exclusion rules: +* Use the `*` to wildcard select any characters to match for the item to be included +* Use either `!` or `-` characters at the start of the line to exclude an otherwise included item + +**Note:** When enabling the use of 'sync_list,' utilise the `--display-config` option to validate that your configuration will be used by the application, and test your configuration by adding `--dry-run` to ensure the client will operate as per your requirement. + +**Note:** After changing the sync_list, you must perform a full re-synchronisation by adding `--resync` to your existing command line - for example: `onedrive --sync --resync` + +**Note:** In some circumstances, it may be required to sync all the individual files within the 'sync_dir', but due to frequent name change / addition / deletion of these files, it is not desirable to constantly change the 'sync_list' file to include / exclude these files and force a resync. To assist with this, enable the following in your configuration file: +```text +sync_root_files = "true" +``` +This will tell the application to sync any file that it finds in your 'sync_dir' root by default, negating the need to constantly update your 'sync_list' file. + +### Performing a --resync +If you alter any of the subsequent configuration items, you will be required to execute a `--resync` to make sure your client is syncing your data with the updated configuration: +* drive_id +* sync_dir +* skip_file +* skip_dir +* skip_dotfiles +* skip_symlinks +* sync_business_shared_items +* Creating, Modifying or Deleting the 'sync_list' file + +Additionally, you might opt for a `--resync` if you think it's necessary to ensure your data remains in sync. If you're using this switch simply because you're unsure of the sync status, you can check the actual sync status using `--display-sync-status`. + +When you use `--resync`, you'll encounter the following warning and advice: +```text +Using --resync will delete your local 'onedrive' client state, so there won't be a record of your current 'sync status.' +This may potentially overwrite local versions of files with older versions downloaded from OneDrive, leading to local data loss. +If in doubt, back up your local data before using --resync. + +Are you sure you want to proceed with --resync? [Y/N] +``` + +To proceed with `--resync`, you must type 'y' or 'Y' to allow the application to continue. + +**Note:** It's highly recommended to use `--resync` only if the application prompts you to do so. Don't blindly set the application to start with `--resync` as the default option. + +**Note:** In certain automated environments (assuming you know what you're doing due to automation), to avoid the 'proceed with acknowledgement' requirement, add `--resync-auth` to automatically acknowledge the prompt. + +### Performing a --force-sync without a --resync or changing your configuration +In some cases and situations, you may have configured the application to skip certain files and folders using 'skip_file' and 'skip_dir' configuration. You then may have a requirement to actually sync one of these items, but do not wish to modify your configuration, nor perform an entire `--resync` twice. + +The `--force-sync` option allows you to sync a specific directory, ignoring your 'skip_file' and 'skip_dir' configuration and negating the requirement to perform a `--resync`. + +To use this option, you must run the application manually in the following manner: +```text +onedrive --sync --single-directory '' --force-sync +``` + +When using `--force-sync`, you'll encounter the following warning and advice: +```text +WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --sync --single-directory --force-sync being used + +Using --force-sync will reconfigure the application to use defaults. This may have unknown future impacts. +By proceeding with this option, you accept any impacts, including potential data loss resulting from using --force-sync. + +Are you sure you want to proceed with --force-sync [Y/N] +``` + +To proceed with `--force-sync`, you must type 'y' or 'Y' to allow the application to continue. + +### Enabling the Client Activity Log +When running onedrive, all actions can be logged to a separate log file. This can be enabled by using the `--enable-logging` flag. By default, log files will be written to `/var/log/onedrive/` and will be in the format of `%username%.onedrive.log`, where `%username%` represents the user who ran the client to allow easy sorting of user to client activity log. + +**Note:** You will need to ensure the existence of this directory and that your user has the applicable permissions to write to this directory; otherwise, the following error message will be printed: +```text +ERROR: Unable to access /var/log/onedrive +ERROR: Please manually create '/var/log/onedrive' and set appropriate permissions to allow write access +ERROR: The requested client activity log will instead be located in your user's home directory +``` + +On many systems, this can be achieved by performing the following: +```text +sudo mkdir /var/log/onedrive +sudo chown root:users /var/log/onedrive +sudo chmod 0775 /var/log/onedrive +``` + +Additionally, you need to ensure that your user account is part of the 'users' group: +``` +cat /etc/group | grep users +``` + +If your user is not part of this group, then you need to add your user to this group: +``` +sudo usermod -a -G users +``` + +If you need to make a group modification, you will need to 'logout' of all sessions / SSH sessions to log in again to have the new group access applied. + +If the client is unable to write the client activity log, the following error message will be printed: +```text +ERROR: Unable to write the activity log to /var/log/onedrive/%username%.onedrive.log +ERROR: Please set appropriate permissions to allow write access to the logging directory for your user account +ERROR: The requested client activity log will instead be located in your user's home directory +``` + +If you receive this error message, you will need to diagnose why your system cannot write to the specified file location. + +#### Client Activity Log Example: +An example of a client activity log for the command `onedrive --sync --enable-logging` is below: +```text +2023-Sep-27 08:16:00.1128806 Configuring Global Azure AD Endpoints +2023-Sep-27 08:16:00.1160620 Sync Engine Initialised with new Onedrive API instance +2023-Sep-27 08:16:00.5227122 All application operations will be performed in: /home/user/OneDrive +2023-Sep-27 08:16:00.5227977 Fetching items from the OneDrive API for Drive ID: +2023-Sep-27 08:16:00.7780979 Processing changes and items received from Microsoft OneDrive ... +2023-Sep-27 08:16:00.7781548 Performing a database consistency and integrity check on locally stored data ... +2023-Sep-27 08:16:00.7785889 Scanning the local file system '~/OneDrive' for new data to upload ... +2023-Sep-27 08:16:00.7813710 Performing a final true-up scan of online data from Microsoft OneDrive +2023-Sep-27 08:16:00.7814668 Fetching items from the OneDrive API for Drive ID: +2023-Sep-27 08:16:01.0141776 Processing changes and items received from Microsoft OneDrive ... +2023-Sep-27 08:16:01.0142454 Sync with Microsoft OneDrive is complete +``` +An example of a client activity log for the command `onedrive --sync --verbose --enable-logging` is below: +```text +2023-Sep-27 08:20:05.4600464 Checking Application Version ... +2023-Sep-27 08:20:05.5235017 Attempting to initialise the OneDrive API ... +2023-Sep-27 08:20:05.5237207 Configuring Global Azure AD Endpoints +2023-Sep-27 08:20:05.5238087 The OneDrive API was initialised successfully +2023-Sep-27 08:20:05.5238536 Opening the item database ... +2023-Sep-27 08:20:05.5270612 Sync Engine Initialised with new Onedrive API instance +2023-Sep-27 08:20:05.9226535 Application version: vX.Y.Z-A-bcdefghi +2023-Sep-27 08:20:05.9227079 Account Type: +2023-Sep-27 08:20:05.9227360 Default Drive ID: +2023-Sep-27 08:20:05.9227550 Default Root ID: +2023-Sep-27 08:20:05.9227862 Remaining Free Space: +2023-Sep-27 08:20:05.9228296 All application operations will be performed in: /home/user/OneDrive +2023-Sep-27 08:20:05.9228989 Fetching items from the OneDrive API for Drive ID: +2023-Sep-27 08:20:06.2076569 Performing a database consistency and integrity check on locally stored data ... +2023-Sep-27 08:20:06.2077121 Processing DB entries for this Drive ID: +2023-Sep-27 08:20:06.2078408 Processing ~/OneDrive +2023-Sep-27 08:20:06.2078739 The directory has not changed +2023-Sep-27 08:20:06.2079783 Processing Attachments +2023-Sep-27 08:20:06.2080071 The directory has not changed +2023-Sep-27 08:20:06.2081585 Processing Attachments/file.docx +2023-Sep-27 08:20:06.2082079 The file has not changed +2023-Sep-27 08:20:06.2082760 Processing Documents +2023-Sep-27 08:20:06.2083225 The directory has not changed +2023-Sep-27 08:20:06.2084284 Processing Documents/file.log +2023-Sep-27 08:20:06.2084886 The file has not changed +2023-Sep-27 08:20:06.2085150 Scanning the local file system '~/OneDrive' for new data to upload ... +2023-Sep-27 08:20:06.2087133 Skipping item - excluded by sync_list config: ./random_25k_files +2023-Sep-27 08:20:06.2116235 Performing a final true-up scan of online data from Microsoft OneDrive +2023-Sep-27 08:20:06.2117190 Fetching items from the OneDrive API for Drive ID: +2023-Sep-27 08:20:06.5049743 Sync with Microsoft OneDrive is complete +``` + +#### Client Activity Log Differences +Despite application logging being enabled as early as possible, the following log entries will be missing from the client activity log when compared to console output: + +**No user configuration file:** +```text +No user or system config file found, using application defaults +Using 'user' configuration path for application state data: /home/user/.config/onedrive +Using the following path to store the runtime application log: /var/log/onedrive +``` +**User configuration file:** +```text +Reading configuration file: /home/user/.config/onedrive/config +Configuration file successfully loaded +Using 'user' configuration path for application state data: /home/user/.config/onedrive +Using the following path to store the runtime application log: /var/log/onedrive +``` + +### GUI Notifications +If notification support has been compiled in (refer to GUI Notification Support in install.md .. ADD LINK LATER), the following events will trigger a GUI notification within the display manager session: +* Aborting a sync if .nosync file is found +* Skipping a particular item due to an invalid name +* Skipping a particular item due to an invalid symbolic link +* Skipping a particular item due to an invalid UTF sequence +* Skipping a particular item due to an invalid character encoding sequence +* Cannot create remote directory +* Cannot upload file changes (free space issue, breaches maximum allowed size, breaches maximum OneDrive Account path length) +* Cannot delete remote file / folder +* Cannot move remote file / folder +* When a re-authentication is required +* When a new client version is available +* Files that fail to upload +* Files that fail to download + +### Handling a Microsoft OneDrive Account Password Change +If you change your Microsoft OneDrive Account Password, the client will no longer be authorised to sync, and will generate the following error upon next application run: +```text +AADSTS50173: The provided grant has expired due to it being revoked, a fresh auth token is needed. The user might have changed or reset their password. The grant was issued on '' and the TokensValidFrom date (before which tokens are not valid) for this user is ''. + +ERROR: You will need to issue a --reauth and re-authorise this client to obtain a fresh auth token. +``` + +To re-authorise the client, follow the steps below: +1. If running the client as a system service (init.d or systemd), stop the applicable system service +2. Run the command `onedrive --reauth`. This will clean up the previous authorisation, and will prompt you to re-authorise the client as per initial configuration. Please note, if you are using `--confdir` as part of your application runtime configuration, you must include this when telling the client to re-authenticate. +3. Restart the client if running as a system service or perform the standalone sync operation again + +The application will now sync with OneDrive with the new credentials. + +### Determining the synchronisation result +When the client has finished syncing without errors, the following will be displayed: +``` +Sync with Microsoft OneDrive is complete +``` + +If any items failed to sync, the following will be displayed: +``` +Sync with Microsoft OneDrive has completed, however there are items that failed to sync. +``` +A file list of either upload or download items will be then listed to allow you to determine your next steps. + +In order to fix the upload or download failures, you may need to re-try your command and perform a resync to ensure your system is correctly synced with your Microsoft OneDrive Account. + +## Frequently Asked Configuration Questions + +### How to change the default configuration of the client? +Configuration is determined by three layers, and applied in the following order: +* Application default values +* Values that are set in the configuration file +* Values that are passed in via the command line at application runtime. These values will override any configuration file set value. + +The default application values provide a reasonable operational default, and additional configuration is entirely optional. + +If you want to change the application defaults, you can download a copy of the config file into your application configuration directory. Valid default directories for the config file are: +* `~/.config/onedrive` +* `/etc/onedrive` + +**Example:** To download a copy of the config file, use the following: +```text +mkdir -p ~/.config/onedrive +wget https://raw.githubusercontent.com/abraunegg/onedrive/master/config -O ~/.config/onedrive/config +``` + +For full configuration options and CLI switches, please refer to application-config-options.md + +### How to change where my data from Microsoft OneDrive is stored? +By default, the location where your Microsoft OneDrive data is stored, is within your Home Directory under a directory called 'OneDrive'. This replicates as close as possible where the Microsoft Windows OneDrive client stores data. + +To change this location, the application configuration option 'sync_dir' is used to specify a new local directory where your Microsoft OneDrive data should be stored. + +**Important Note:** If your `sync_dir` is pointing to a network mount point (a network share via NFS, Windows Network Share, Samba Network Share) these types of network mount points do not support 'inotify', thus tracking real-time changes via inotify of local files is not possible when using 'Monitor Mode'. Local filesystem changes will be replicated between the local filesystem and Microsoft OneDrive based on the `monitor_interval` value. This is not something (inotify support for NFS, Samba) that this client can fix. + +### How to change what file and directory permissions are assigned to data that is downloaded from Microsoft OneDrive? +The following are the application default permissions for any new directory or file that is created locally when downloaded from Microsoft OneDrive: +* Directories: 700 - This provides the following permissions: `drwx------` +* Files: 600 - This provides the following permissions: `-rw-------` + +These default permissions align to the security principal of 'least privilege' so that only you should have access to your data that you download from Microsoft OneDrive. + +To alter these default permissions, you can adjust the values of two configuration options as follows. You can also use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. +```text +sync_dir_permissions = "700" +sync_file_permissions = "600" +``` + +**Important:** Please note that special permission bits such as setuid, setgid, and the sticky bit are not supported. Valid permission values range from `000` to `777` only. + +### How to only sync a specific directory? +There are two methods to achieve this: +* Employ the '--single-directory' option to only sync this specific path +* Employ 'sync_list' as part of your 'config' file to configure what files and directories to sync, and what should be excluded + +### How to 'skip' files from syncing? +There are two methods to achieve this: +* Employ 'skip_file' as part of your 'config' file to configure what files to skip +* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded + +### How to 'skip' directories from syncing? +There are three methods available to 'skip' a directory from the sync process: +* Employ 'skip_dir' as part of your 'config' file to configure what directories to skip +* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded +* Employ 'check_nosync' as part of your 'config' file and a '.nosync' empty file within the directory to exclude to skip that directory + +### How to 'skip' dot files and folders from syncing? +There are three methods to achieve this: +* Employ 'skip_file' or 'skip_dir' to configure what files or folders to skip +* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded +* Employ 'skip_dotfiles' as part of your 'config' file to skip any dot file (for example: `.Trash-1000` or `.xdg-volume-info`) from syncing to OneDrive + +### How to 'skip' files larger than a certain size from syncing? +Use `skip_size = "value"` as part of your 'config' file where files larger than this size (in MB) will be skipped. + +### How to 'rate limit' the application to control bandwidth consumed for upload & download operations? +To minimise the Internet bandwidth for upload and download operations, you can add the 'rate_limit' configuration option as part of your 'config' file. + +The default value is '0' which means use all available bandwidth for the application. + +The value being used can be reviewed when using `--display-config`. + +### How can I prevent my local disk from filling up? +By default, the application will reserve 50MB of disk space to prevent your filesystem from running out of disk space. + +This default value can be modified by adding the 'space_reservation' configuration option and the applicable value as part of your 'config' file. + +You can review the value being used when using `--display-config`. + +### How does the client handle symbolic links? +Microsoft OneDrive has no concept or understanding of symbolic links, and attempting to upload a symbolic link to Microsoft OneDrive generates a platform API error. All data (files and folders) that are uploaded to OneDrive must be whole files or actual directories. + +As such, there are only two methods to support symbolic links with this client: +1. Follow the Linux symbolic link and upload whatever the local symbolic link is pointing to to Microsoft OneDrive. This is the default behaviour. +2. Skip symbolic links by configuring the application to do so. When skipping, no data, no link, no reference is uploaded to OneDrive. + +Use 'skip_symlinks' as part of your 'config' file to configure the skipping of all symbolic links while syncing. + +### How to synchronise shared folders (OneDrive Personal)? +Folders shared with you can be synchronised by adding them to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the folder you want to synchronise, and then click on "Add to my OneDrive". + +### How to synchronise shared folders (OneDrive Business or Office 365)? +Folders shared with you can be synchronised by adding them to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the folder you want to synchronise, and then click on "Add to my OneDrive". + +Refer to [./business-shared-folders.md](business-shared-folders.md) for further details. + +### How to synchronise SharePoint / Office 365 Shared Libraries? +There are two methods to achieve this: +* SharePoint library can be directly added to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the SharePoint Library you want to synchronise, and then click on "Add to my OneDrive". +* Configure a separate application instance to only synchronise that specific SharePoint Library. Refer to [./sharepoint-libraries.md](sharepoint-libraries.md) for configuration assistance. + +### How to Create a Shareable Link? +In certain situations, you might want to generate a shareable file link and provide this link to other users for accessing a specific file. + +To accomplish this, employ the following command: +```text +onedrive --create-share-link +``` +**Note:** By default, this access permissions for the file link will be read-only. + +To make it a read-write link, execute the following command: +```text +onedrive --create-share-link --with-editing-perms +``` +**Note:** The order of the file path and option flag is crucial. + +### How to Synchronise Both Personal and Business Accounts at once? +You need to set up separate instances of the application configuration for each account. + +Refer to [./advanced-usage.md](advanced-usage.md) for guidance on configuration. + +### How to Synchronise Multiple SharePoint Libraries simultaneously? +For each SharePoint Library, configure a separate instance of the application configuration. + +Refer to [./advanced-usage.md](advanced-usage.md) for configuration instructions. + +### How to Receive Real-time Changes from Microsoft OneDrive Service, instead of waiting for the next sync period? +When operating in 'Monitor Mode,' it may be advantageous to receive real-time updates to online data. A 'webhook' is the method to achieve this, so that when in 'Monitor Mode,' the client subscribes to remote updates. + +Remote changes can then be promptly synchronised to your local file system, without waiting for the next synchronisation cycle. + +This is accomplished by: +* Using 'webhook_enabled' as part of your 'config' file to enable this feature +* Using 'webhook_public_url' as part of your 'config' file to configure the URL the webhook will use for subscription updates + +### How to initiate the client as a background service? +There are a few ways to employ onedrive as a service: +* via init.d +* via systemd +* via runit + +#### OneDrive service running as root user via init.d +```text +chkconfig onedrive on +service onedrive start +``` +To view the logs, execute: +```text +tail -f /var/log/onedrive/.onedrive.log +``` +To alter the 'user' under which the client operates (typically root by default), manually modify the init.d service file and adjust `daemon --user root onedrive_service.sh` to match the correct user. + +#### OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora) +Initially, switch to the root user with `su - root`, then activate the systemd service: +```text +systemctl --user enable onedrive +systemctl --user start onedrive +``` +**Note:** The `systemctl --user` command is not applicable to Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms - see below. + +**Note:** This will execute the 'onedrive' process with a UID/GID of '0', which means any files or folders created will be owned by 'root'. + +To monitor the service's status, use the following: +```text +systemctl --user status onedrive.service +``` + +To observe the systemd application logs, use: +```text +journalctl --user-unit=onedrive -f +``` + +**Note:** For systemd to function correctly, it requires the presence of XDG environment variables. If you encounter the following error while enabling the systemd service: +```text +Failed to connect to bus: No such file or directory +``` +The most likely cause is missing XDG environment variables. To resolve this, add the following lines to `.bashrc` or another file executed upon user login: +```text +export XDG_RUNTIME_DIR="/run/user/$UID" +export DBUS_SESSION_BUS_ADDRESS="unix:path=${XDG_RUNTIME_DIR}/bus" +``` + +To apply this change, you must log out of all user accounts where it has been made. + +**Note:** On certain systems (e.g., Raspbian / Ubuntu / Debian on Raspberry Pi), the XDG fix above may not persist after system reboots. An alternative to starting the client via systemd as root is as follows: +1. Create a symbolic link from `/home/root/.config/onedrive` to `/root/.config/onedrive/`. +2. Establish a systemd service using the '@' service file: `systemctl enable onedrive@root.service`. +3. Start the root@service: `systemctl start onedrive@root.service`. + +This ensures that the service correctly restarts upon system reboot. + +To examine the systemd application logs, run: +```text +journalctl --unit=onedrive@ -f +``` + +#### OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux) +```text +systemctl enable onedrive +systemctl start onedrive +``` +**Note:** This will execute the 'onedrive' process with a UID/GID of '0', meaning any files or folders created will be owned by 'root'. + +To view the systemd application logs, execute: +```text +journalctl --unit=onedrive -f +``` + +#### OneDrive service running as a non-root user via systemd (All Linux Distributions) +In some instances, it is preferable to run the OneDrive client as a service without the 'root' user. Follow the instructions below to configure the service for your regular user login. + +1. As the user who will run the service, launch the application in standalone mode, authorize it for use, and verify that synchronization is functioning as expected: +```text +onedrive --sync --verbose +``` +2. After validating the application for your user, switch to the 'root' user, where is your username from step 1 above. +```text +systemctl enable onedrive@.service +systemctl start onedrive@.service +``` +3. To check the service's status for the user, use the following: +```text +systemctl status onedrive@.service +``` + +To observe the systemd application logs, use: +```text +journalctl --unit=onedrive@ -f +``` + +#### OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora) +In some scenarios, you may want to receive GUI notifications when using the client as a non-root user. In this case, follow these steps: + +1. Log in via the graphical UI as the user you want to enable the service for. +2. Disable any `onedrive@` service files for your username, e.g.: +```text +sudo systemctl stop onedrive@alex.service +sudo systemctl disable onedrive@alex.service +``` +3. Enable the service as follows: +```text +systemctl --user enable onedrive +systemctl --user start onedrive +``` + +To check the service's status for the user, use the following: +```text +systemctl --user status onedrive.service +``` + +To view the systemd application logs, execute: +```text +journalctl --user-unit=onedrive -f +``` + +**Note:** The `systemctl --user` command is not applicable to Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms. + +#### OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void) + +1. Create the following folder if it doesn't already exist: `/etc/sv/runsvdir-` + + - where `` is the `USER` targeted for the service + - e.g., `# mkdir /etc/sv/runsvdir-nolan` + +2. Create a file called `run` under the previously created folder with executable permissions + + - `# touch /etc/sv/runsvdir-/run` + - `# chmod 0755 /etc/sv/runsvdir-/run` + +3. Edit the `run` file with the following contents (permissions needed): + + ```sh + #!/bin/sh + export USER="" + export HOME="/home/" + + groups="$(id -Gn "${USER}" | tr ' ' ':')" + svdir="${HOME}/service" + + exec chpst -u "${USER}:${groups}" runsvdir "${svdir}" + ``` + + - Ensure you replace `` with the `USER` set in step #1. + +4. Enable the previously created folder as a service + + - `# ln -fs /etc/sv/runsvdir- /var/service/` + +5. Create a subfolder in the `USER`'s `HOME` directory to store the services (or symlinks) + + - `$ mkdir ~/service` + +6. Create a subfolder specifically for OneDrive + + - `$ mkdir ~/service/onedrive/` + +7. Create a file called `run` under the previously created folder with executable permissions + + - `$ touch ~/service/onedrive/run` + - `$ chmod 0755 ~/service/onedrive/run` + +8. Append the following contents to the `run` file + + ```sh + #!/usr/bin/env sh + exec /usr/bin/onedrive --monitor + ``` + + - In some scenarios, the path to the `onedrive` binary may vary. You can obtain it by running `$ command -v onedrive`. + +9. Reboot to apply the changes + +10. Check the status of user-defined services + + - `$ sv status ~/service/*` + +For additional details, you can refer to Void's documentation on [Per-User Services](https://docs.voidlinux.org/config/services/user-services.html). + +### How to start a user systemd service at boot without user login? +In some situations, it may be necessary for the systemd service to start without requiring your 'user' to log in. + +To address this issue, you need to reconfigure your 'user' account so that the systemd services you've created launch without the need for you to log in to your system: +```text +loginctl enable-linger +``` \ No newline at end of file diff --git a/README.md b/readme.md similarity index 63% rename from README.md rename to readme.md index 28b663595..7cf3697ca 100644 --- a/README.md +++ b/readme.md @@ -5,11 +5,13 @@ [![Build Docker Images](https://github.com/abraunegg/onedrive/actions/workflows/docker.yaml/badge.svg)](https://github.com/abraunegg/onedrive/actions/workflows/docker.yaml) [![Docker Pulls](https://img.shields.io/docker/pulls/driveone/onedrive)](https://hub.docker.com/r/driveone/onedrive) -A free Microsoft OneDrive Client which supports OneDrive Personal, OneDrive for Business, OneDrive for Office365 and SharePoint. +Introducing a free Microsoft OneDrive Client that seamlessly supports OneDrive Personal, OneDrive for Business, OneDrive for Office365, and SharePoint Libraries. -This powerful and highly configurable client can run on all major Linux distributions, FreeBSD, or as a Docker container. It supports one-way and two-way sync capabilities and securely connects to Microsoft OneDrive services. +This robust and highly customisable client is compatible with all major Linux distributions and FreeBSD, and can also be deployed as a container using Docker or Podman. It offers both one-way and two-way synchronisation capabilities while ensuring a secure connection to Microsoft OneDrive services. -This client is a 'fork' of the [skilion](https://github.com/skilion/onedrive) client, which the developer has confirmed he has no desire to maintain or support the client ([reference](https://github.com/skilion/onedrive/issues/518#issuecomment-717604726)). This fork has been in active development since mid 2018. +Originally derived as a 'fork' from the [skilion](https://github.com/skilion/onedrive) client, it's worth noting that the developer of the original client has explicitly stated they have no intention of maintaining or supporting their work ([reference](https://github.com/skilion/onedrive/issues/518#issuecomment-717604726)). + +This client represents a 100% re-imagining of the original work, addressing numerous notable bugs and issues while incorporating a significant array of new features. This client has been under active development since mid-2018. ## Features * State caching @@ -26,6 +28,7 @@ This client is a 'fork' of the [skilion](https://github.com/skilion/onedrive) cl * Support for National cloud deployments (Microsoft Cloud for US Government, Microsoft Cloud Germany, Azure and Office 365 operated by 21Vianet in China) * Supports single & multi-tenanted applications * Supports rate limiting of traffic +* Supports multi-threaded uploads and downloads ## What's missing * Ability to encrypt/decrypt files on-the-fly when uploading/downloading files from OneDrive @@ -36,28 +39,17 @@ This client is a 'fork' of the [skilion](https://github.com/skilion/onedrive) cl * Colorful log output terminal modification: [OneDrive Client for Linux Colorful log Output](https://github.com/zzzdeb/dotfiles/blob/master/scripts/tools/onedrive_log) * System Tray Icon: [OneDrive Client for Linux System Tray Icon](https://github.com/DanielBorgesOliveira/onedrive_tray) -## Supported Application Version -Only the current application release version or greater is supported. - -The current application release version is: [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) - -Check the version of the application you are using `onedrive --version` and ensure that you are running either the current release or compile the application yourself from master to get the latest version. - -If you are not using the above application version or greater, you must upgrade your application to obtain support. - -## Have a Question -If you have a question or need something clarified, please raise a new disscussion post [here](https://github.com/abraunegg/onedrive/discussions) - -Be sure to review the Frequently Asked Questions as well before raising a new discussion post. - ## Frequently Asked Questions Refer to [Frequently Asked Questions](https://github.com/abraunegg/onedrive/wiki/Frequently-Asked-Questions) +## Have a question +If you have a question or need something clarified, please raise a new disscussion post [here](https://github.com/abraunegg/onedrive/discussions) + ## Reporting an Issue or Bug -If you encounter any bugs you can report them here on GitHub. Before filing an issue be sure to: +If you encounter any bugs you can report them here on Github. Before filing an issue be sure to: -1. Check the version of the application you are using `onedrive --version` and ensure that you are running a supported application version. If you are not using a supported application version, you must first upgrade your application to a supported version and then re-test for your issue. -2. If you are using a supported applcation version, fill in a new bug report using the [issue template](https://github.com/abraunegg/onedrive/issues/new?template=bug_report.md) +1. Check the version of the application you are using `onedrive --version` and ensure that you are running either the latest [release](https://github.com/abraunegg/onedrive/releases) or built from master. +2. Fill in a new bug report using the [issue template](https://github.com/abraunegg/onedrive/issues/new?template=bug_report.md) 3. Generate a debug log for support using the following [process](https://github.com/abraunegg/onedrive/wiki/Generate-debug-log-for-support) * If you are in *any* way concerned regarding the sensitivity of the data contained with in the verbose debug log file, create a new OneDrive account, configure the client to use that, use *dummy* data to simulate your environment and then replicate your original issue * If you are still concerned, provide an NDA or confidentiality document to sign @@ -70,23 +62,23 @@ Refer to [docs/known-issues.md](https://github.com/abraunegg/onedrive/blob/maste ## Documentation and Configuration Assistance ### Installing from Distribution Packages or Building the OneDrive Client for Linux from source -Refer to [docs/INSTALL.md](https://github.com/abraunegg/onedrive/blob/master/docs/INSTALL.md) +Refer to [docs/install.md](https://github.com/abraunegg/onedrive/blob/master/docs/install.md) ### Configuration and Usage -Refer to [docs/USAGE.md](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md) +Refer to [docs/usage.md](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md) ### Configure OneDrive Business Shared Folders -Refer to [docs/BusinessSharedFolders.md](https://github.com/abraunegg/onedrive/blob/master/docs/BusinessSharedFolders.md) +Refer to [docs/business-shared-folders.md](https://github.com/abraunegg/onedrive/blob/master/docs/business-shared-folders.md) ### Configure SharePoint / Office 365 Shared Libraries (Business or Education) -Refer to [docs/SharePoint-Shared-Libraries.md](https://github.com/abraunegg/onedrive/blob/master/docs/SharePoint-Shared-Libraries.md) +Refer to [docs/sharepoint-libraries.md](https://github.com/abraunegg/onedrive/blob/master/docs/sharepoint-libraries.md) ### Configure National Cloud support Refer to [docs/national-cloud-deployments.md](https://github.com/abraunegg/onedrive/blob/master/docs/national-cloud-deployments.md) ### Docker support -Refer to [docs/Docker.md](https://github.com/abraunegg/onedrive/blob/master/docs/Docker.md) +Refer to [docs/docker.md](https://github.com/abraunegg/onedrive/blob/master/docs/docker.md) ### Podman support -Refer to [docs/Podman.md](https://github.com/abraunegg/onedrive/blob/master/docs/Podman.md) +Refer to [docs/podman.md](https://github.com/abraunegg/onedrive/blob/master/docs/podman.md) diff --git a/src/clientSideFiltering.d b/src/clientSideFiltering.d new file mode 100644 index 000000000..ed67c629e --- /dev/null +++ b/src/clientSideFiltering.d @@ -0,0 +1,384 @@ +// What is this module called? +module clientSideFiltering; + +// What does this module require to function? +import std.algorithm; +import std.array; +import std.file; +import std.path; +import std.regex; +import std.stdio; +import std.string; + +// What other modules that we have created do we need to import? +import config; +import util; +import log; + +class ClientSideFiltering { + // Class variables + ApplicationConfig appConfig; + string[] paths; + string[] businessSharedItemsList; + Regex!char fileMask; + Regex!char directoryMask; + bool skipDirStrictMatch = false; + bool skipDotfiles = false; + + this(ApplicationConfig appConfig) { + // Configure the class varaible to consume the application configuration + this.appConfig = appConfig; + } + + // Initialise the required items + bool initialise() { + // + log.vdebug("Configuring Client Side Filtering (Selective Sync)"); + // Load the sync_list file if it exists + if (exists(appConfig.syncListFilePath)){ + loadSyncList(appConfig.syncListFilePath); + } + + // Load the Business Shared Items file if it exists + if (exists(appConfig.businessSharedItemsFilePath)){ + loadBusinessSharedItems(appConfig.businessSharedItemsFilePath); + } + + // Configure skip_dir, skip_file, skip-dir-strict-match & skip_dotfiles from config entries + // Handle skip_dir configuration in config file + log.vdebug("Configuring skip_dir ..."); + log.vdebug("skip_dir: ", appConfig.getValueString("skip_dir")); + setDirMask(appConfig.getValueString("skip_dir")); + + // Was --skip-dir-strict-match configured? + log.vdebug("Configuring skip_dir_strict_match ..."); + log.vdebug("skip_dir_strict_match: ", appConfig.getValueBool("skip_dir_strict_match")); + if (appConfig.getValueBool("skip_dir_strict_match")) { + setSkipDirStrictMatch(); + } + + // Was --skip-dot-files configured? + log.vdebug("Configuring skip_dotfiles ..."); + log.vdebug("skip_dotfiles: ", appConfig.getValueBool("skip_dotfiles")); + if (appConfig.getValueBool("skip_dotfiles")) { + setSkipDotfiles(); + } + + // Handle skip_file configuration in config file + log.vdebug("Configuring skip_file ..."); + // Validate skip_file to ensure that this does not contain an invalid configuration + // Do not use a skip_file entry of .* as this will prevent correct searching of local changes to process. + foreach(entry; appConfig.getValueString("skip_file").split("|")){ + if (entry == ".*") { + // invalid entry element detected + log.error("ERROR: Invalid skip_file entry '.*' detected"); + return false; + } + } + + // All skip_file entries are valid + log.vdebug("skip_file: ", appConfig.getValueString("skip_file")); + setFileMask(appConfig.getValueString("skip_file")); + + // All configured OK + return true; + } + + // Load sync_list file if it exists + void loadSyncList(string filepath) { + // open file as read only + auto file = File(filepath, "r"); + auto range = file.byLine(); + foreach (line; range) { + // Skip comments in file + if (line.length == 0 || line[0] == ';' || line[0] == '#') continue; + paths ~= buildNormalizedPath(line); + } + file.close(); + } + + // load business_shared_folders file + void loadBusinessSharedItems(string filepath) { + // open file as read only + auto file = File(filepath, "r"); + auto range = file.byLine(); + foreach (line; range) { + // Skip comments in file + if (line.length == 0 || line[0] == ';' || line[0] == '#') continue; + businessSharedItemsList ~= buildNormalizedPath(line); + } + file.close(); + } + + // Configure the regex that will be used for 'skip_file' + void setFileMask(const(char)[] mask) { + fileMask = wild2regex(mask); + log.vdebug("Selective Sync File Mask: ", fileMask); + } + + // Configure the regex that will be used for 'skip_dir' + void setDirMask(const(char)[] dirmask) { + directoryMask = wild2regex(dirmask); + log.vdebug("Selective Sync Directory Mask: ", directoryMask); + } + + // Configure skipDirStrictMatch if function is called + // By default, skipDirStrictMatch = false; + void setSkipDirStrictMatch() { + skipDirStrictMatch = true; + } + + // Configure skipDotfiles if function is called + // By default, skipDotfiles = false; + void setSkipDotfiles() { + skipDotfiles = true; + } + + // return value of skipDotfiles + bool getSkipDotfiles() { + return skipDotfiles; + } + + // Match against sync_list only + bool isPathExcludedViaSyncList(string path) { + // Debug output that we are performing a 'sync_list' inclusion / exclusion test + return isPathExcluded(path, paths); + } + + // config file skip_dir parameter + bool isDirNameExcluded(string name) { + // Does the directory name match skip_dir config entry? + // Returns true if the name matches a skip_dir config entry + // Returns false if no match + log.vdebug("skip_dir evaluation for: ", name); + + // Try full path match first + if (!name.matchFirst(directoryMask).empty) { + log.vdebug("'!name.matchFirst(directoryMask).empty' returned true = matched"); + return true; + } else { + // Do we check the base name as well? + if (!skipDirStrictMatch) { + log.vdebug("No Strict Matching Enforced"); + + // Test the entire path working backwards from child + string path = buildNormalizedPath(name); + string checkPath; + auto paths = pathSplitter(path); + + foreach_reverse(directory; paths) { + if (directory != "/") { + // This will add a leading '/' but that needs to be stripped to check + checkPath = "/" ~ directory ~ checkPath; + if(!checkPath.strip('/').matchFirst(directoryMask).empty) { + log.vdebug("'!checkPath.matchFirst(directoryMask).empty' returned true = matched"); + return true; + } + } + } + } else { + log.vdebug("Strict Matching Enforced - No Match"); + } + } + // no match + return false; + } + + // config file skip_file parameter + bool isFileNameExcluded(string name) { + // Does the file name match skip_file config entry? + // Returns true if the name matches a skip_file config entry + // Returns false if no match + log.vdebug("skip_file evaluation for: ", name); + + // Try full path match first + if (!name.matchFirst(fileMask).empty) { + return true; + } else { + // check just the file name + string filename = baseName(name); + if(!filename.matchFirst(fileMask).empty) { + return true; + } + } + // no match + return false; + } + + // test if the given path is not included in the allowed paths + // if there are no allowed paths always return false + private bool isPathExcluded(string path, string[] allowedPaths) { + // function variables + bool exclude = false; + bool exludeDirectMatch = false; // will get updated to true, if there is a pattern match to sync_list entry + bool excludeMatched = false; // will get updated to true, if there is a pattern match to sync_list entry + bool finalResult = true; // will get updated to false, if pattern match to sync_list entry + int offset; + string wildcard = "*"; + + // always allow the root + if (path == ".") return false; + // if there are no allowed paths always return false + if (allowedPaths.empty) return false; + path = buildNormalizedPath(path); + log.vdebug("Evaluation against 'sync_list' for this path: ", path); + log.vdebug("[S]exclude = ", exclude); + log.vdebug("[S]exludeDirectMatch = ", exludeDirectMatch); + log.vdebug("[S]excludeMatched = ", excludeMatched); + + // unless path is an exact match, entire sync_list entries need to be processed to ensure + // negative matches are also correctly detected + foreach (allowedPath; allowedPaths) { + // is this an inclusion path or finer grained exclusion? + switch (allowedPath[0]) { + case '-': + // sync_list path starts with '-', this user wants to exclude this path + exclude = true; + // If the sync_list entry starts with '-/' offset needs to be 2, else 1 + if (startsWith(allowedPath, "-/")){ + // Offset needs to be 2 + offset = 2; + } else { + // Offset needs to be 1 + offset = 1; + } + break; + case '!': + // sync_list path starts with '!', this user wants to exclude this path + exclude = true; + // If the sync_list entry starts with '!/' offset needs to be 2, else 1 + if (startsWith(allowedPath, "!/")){ + // Offset needs to be 2 + offset = 2; + } else { + // Offset needs to be 1 + offset = 1; + } + break; + case '/': + // sync_list path starts with '/', this user wants to include this path + // but a '/' at the start causes matching issues, so use the offset for comparison + exclude = false; + offset = 1; + break; + + default: + // no negative pattern, default is to not exclude + exclude = false; + offset = 0; + } + + // What are we comparing against? + log.vdebug("Evaluation against 'sync_list' entry: ", allowedPath); + + // Generate the common prefix from the path vs the allowed path + auto comm = commonPrefix(path, allowedPath[offset..$]); + + // Is path is an exact match of the allowed path? + if (comm.length == path.length) { + // we have a potential exact match + // strip any potential '/*' from the allowed path, to avoid a potential lesser common match + string strippedAllowedPath = strip(allowedPath[offset..$], "/*"); + + if (path == strippedAllowedPath) { + // we have an exact path match + log.vdebug("exact path match"); + if (!exclude) { + log.vdebug("Evaluation against 'sync_list' result: direct match"); + finalResult = false; + // direct match, break and go sync + break; + } else { + log.vdebug("Evaluation against 'sync_list' result: direct match - path to be excluded"); + // do not set excludeMatched = true here, otherwise parental path also gets excluded + // flag exludeDirectMatch so that a 'wildcard match' will not override this exclude + exludeDirectMatch = true; + // final result + finalResult = true; + } + } else { + // no exact path match, but something common does match + log.vdebug("something 'common' matches the input path"); + auto splitAllowedPaths = pathSplitter(strippedAllowedPath); + string pathToEvaluate = ""; + foreach(base; splitAllowedPaths) { + pathToEvaluate ~= base; + if (path == pathToEvaluate) { + // The input path matches what we want to evaluate against as a direct match + if (!exclude) { + log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item"); + finalResult = false; + // direct match, break and go sync + break; + } else { + log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item but to be excluded"); + finalResult = true; + // do not set excludeMatched = true here, otherwise parental path also gets excluded + } + } + pathToEvaluate ~= dirSeparator; + } + } + } + + // Is path is a subitem/sub-folder of the allowed path? + if (comm.length == allowedPath[offset..$].length) { + // The given path is potentially a subitem of an allowed path + // We want to capture sub-folders / files of allowed paths here, but not explicitly match other items + // if there is no wildcard + auto subItemPathCheck = allowedPath[offset..$] ~ "/"; + if (canFind(path, subItemPathCheck)) { + // The 'path' includes the allowed path, and is 'most likely' a sub-path item + if (!exclude) { + log.vdebug("Evaluation against 'sync_list' result: parental path match"); + finalResult = false; + // parental path matches, break and go sync + break; + } else { + log.vdebug("Evaluation against 'sync_list' result: parental path match but must be excluded"); + finalResult = true; + excludeMatched = true; + } + } + } + + // Does the allowed path contain a wildcard? (*) + if (canFind(allowedPath[offset..$], wildcard)) { + // allowed path contains a wildcard + // manually replace '*' for '.*' to be compatible with regex + string regexCompatiblePath = replace(allowedPath[offset..$], "*", ".*"); + auto allowedMask = regex(regexCompatiblePath); + if (matchAll(path, allowedMask)) { + // regex wildcard evaluation matches + // if we have a prior pattern match for an exclude, excludeMatched = true + if (!exclude && !excludeMatched && !exludeDirectMatch) { + // nothing triggered an exclusion before evaluation against wildcard match attempt + log.vdebug("Evaluation against 'sync_list' result: wildcard pattern match"); + finalResult = false; + } else { + log.vdebug("Evaluation against 'sync_list' result: wildcard pattern matched but must be excluded"); + finalResult = true; + excludeMatched = true; + } + } + } + } + // Interim results + log.vdebug("[F]exclude = ", exclude); + log.vdebug("[F]exludeDirectMatch = ", exludeDirectMatch); + log.vdebug("[F]excludeMatched = ", excludeMatched); + + // If exclude or excludeMatched is true, then finalResult has to be true + if ((exclude) || (excludeMatched) || (exludeDirectMatch)) { + finalResult = true; + } + + // results + if (finalResult) { + log.vdebug("Evaluation against 'sync_list' final result: EXCLUDED"); + } else { + log.vdebug("Evaluation against 'sync_list' final result: included for sync"); + } + return finalResult; + } +} \ No newline at end of file diff --git a/src/config.d b/src/config.d index 8c9ba2ff9..dbb40bb11 100644 --- a/src/config.d +++ b/src/config.d @@ -1,130 +1,208 @@ +// What is this module called? +module config; + +// What does this module require to function? import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit; -import std.file, std.string, std.regex, std.stdio, std.process, std.algorithm.searching, std.getopt, std.conv, std.path; +import std.stdio; +import std.process; +import std.regex; +import std.string; +import std.algorithm.searching; import std.algorithm.sorting: sort; -import selective; -static import log; +import std.file; +import std.conv; +import std.path; +import std.getopt; +import std.format; +import std.ascii; +import std.datetime; -final class Config -{ - // application defaults - public string defaultSyncDir = "~/OneDrive"; - public string defaultSkipFile = "~*|.~*|*.tmp"; - public string defaultSkipDir = ""; - public string defaultLogFileDir = "/var/log/onedrive/"; - // application set items - public string refreshTokenFilePath = ""; - public string deltaLinkFilePath = ""; - public string databaseFilePath = ""; - public string databaseFilePathDryRun = ""; - public string uploadStateFilePath = ""; - public string syncListFilePath = ""; - public string homePath = ""; - public string configDirName = ""; - public string systemConfigDirName = ""; - public string configFileSyncDir = ""; - public string configFileSkipFile = ""; - public string configFileSkipDir = ""; - public string businessSharedFolderFilePath = ""; - private string userConfigFilePath = ""; - private string systemConfigFilePath = ""; - // was the application just authorised - paste of response uri - public bool applicationAuthorizeResponseUri = false; - // hashmap for the values found in the user config file - // ARGGGG D is stupid and cannot make hashmap initializations!!! - // private string[string] foobar = [ "aa": "bb" ] does NOT work!!! - private string[string] stringValues; - private bool[string] boolValues; - private long[string] longValues; - // Compile time regex - this does not change - public auto configRegex = ctRegex!(`^(\w+)\s*=\s*"(.*)"\s*$`); - // Default directory permission mode - public long defaultDirectoryPermissionMode = 700; - public int configuredDirectoryPermissionMode; - // Default file permission mode - public long defaultFilePermissionMode = 600; - public int configuredFilePermissionMode; - - // Bring in v2.5.0 config items +// What other modules that we have created do we need to import? +import log; +import util; + +class ApplicationConfig { + // Application default values - these do not change + // - Compile time regex + immutable auto configRegex = ctRegex!(`^(\w+)\s*=\s*"(.*)"\s*$`); + // - Default directory to store data + immutable string defaultSyncDir = "~/OneDrive"; + // - Default Directory Permissions + immutable long defaultDirectoryPermissionMode = 700; + // - Default File Permissions + immutable long defaultFilePermissionMode = 600; + // - Default types of files to skip + // v2.0.x - 2.4.x: ~*|.~*|*.tmp + // v2.5.x : ~*|.~*|*.tmp|*.swp|*.partial + immutable string defaultSkipFile = "~*|.~*|*.tmp|*.swp|*.partial"; + // - Default directories to skip (default is skip none) + immutable string defaultSkipDir = ""; + // - Default log directory + immutable string defaultLogFileDir = "/var/log/onedrive"; + // - Default configuration directory + immutable string defaultConfigDirName = "~/.config/onedrive"; + + // Microsoft Requirements + // - Default Application ID (abraunegg) + immutable string defaultApplicationId = "d50ca740-c83f-4d1b-b616-12c519384f0c"; + // - Microsoft User Agent ISV Tag + immutable string isvTag = "ISV"; + // - Microsoft User Agent Company name + immutable string companyName = "abraunegg"; + // - Microsoft Application name as per Microsoft Azure application registration + immutable string appTitle = "OneDrive Client for Linux"; + // Comply with OneDrive traffic decoration requirements + // https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online + // - Identify as ISV and include Company Name, App Name separated by a pipe character and then adding Version number separated with a slash character + + //immutable string defaultUserAgent = isvTag ~ "|" ~ companyName ~ "|" ~ appTitle ~ "/" ~ strip(import("version")); + immutable string defaultUserAgent = isvTag ~ "|" ~ companyName ~ "|" ~ appTitle ~ "/" ~ "v2.5.0-alpha-2"; // HTTP Struct items, used for configuring HTTP() // Curl Timeout Handling // libcurl dns_cache_timeout timeout immutable int defaultDnsTimeout = 60; // Connect timeout for HTTP|HTTPS connections - immutable int defaultConnectTimeout = 10; - // With the following settings we force - // - if there is no data flow for 10min, abort - // - if the download time for one item exceeds 1h, abort - // - // Timeout for activity on connection - // this translates into Curl's CURLOPT_LOW_SPEED_TIME - // which says: - // It contains the time in number seconds that the - // transfer speed should be below the CURLOPT_LOW_SPEED_LIMIT - // for the library to consider it too slow and abort. - immutable int defaultDataTimeout = 600; + immutable int defaultConnectTimeout = 30; + // Default data timeout for HTTP + // curl.d has a default of: _defaultDataTimeout = dur!"minutes"(2); + immutable int defaultDataTimeout = 240; // Maximum time any operation is allowed to take // This includes dns resolution, connecting, data transfer, etc. immutable int defaultOperationTimeout = 3600; - // Specify how many redirects should be allowed - immutable int defaultMaxRedirects = 5; // Specify what IP protocol version should be used when communicating with OneDrive immutable int defaultIpProtocol = 0; // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only + // Specify how many redirects should be allowed + immutable int defaultMaxRedirects = 5; + + // Azure Active Directory & Graph Explorer Endpoints + // - Global & Default + immutable string globalAuthEndpoint = "https://login.microsoftonline.com"; + immutable string globalGraphEndpoint = "https://graph.microsoft.com"; + // - US Government L4 + immutable string usl4AuthEndpoint = "https://login.microsoftonline.us"; + immutable string usl4GraphEndpoint = "https://graph.microsoft.us"; + // - US Government L5 + immutable string usl5AuthEndpoint = "https://login.microsoftonline.us"; + immutable string usl5GraphEndpoint = "https://dod-graph.microsoft.us"; + // - Germany + immutable string deAuthEndpoint = "https://login.microsoftonline.de"; + immutable string deGraphEndpoint = "https://graph.microsoft.de"; + // - China + immutable string cnAuthEndpoint = "https://login.chinacloudapi.cn"; + immutable string cnGraphEndpoint = "https://microsoftgraph.chinacloudapi.cn"; + // Application items that depend on application run-time environment, thus cannot be immutable + // Public variables + // Was the application just authorised - paste of response uri + bool applicationAuthorizeResponseUri = false; + // Store the 'refresh_token' file path + string refreshTokenFilePath = ""; + // Store the refreshToken for use within the application + string refreshToken; + // Store the accessTokenExpiration for use within the application + SysTime accessTokenExpiration; + // Store the current accessToken for use within the application + string accessToken; - - this(string confdirOption) - { - // default configuration - entries in config file ~/.config/onedrive/config - // an entry here means it can be set via the config file if there is a coresponding entry, read from config and set via update_from_args() - stringValues["sync_dir"] = defaultSyncDir; - stringValues["skip_file"] = defaultSkipFile; - stringValues["skip_dir"] = defaultSkipDir; + // Store the 'session_upload.CRC32-HASH' file path + string uploadSessionFilePath = ""; + + bool apiWasInitialised = false; + bool syncEngineWasInitialised = false; + string accountType; + string defaultDriveId; + string defaultRootId; + ulong remainingFreeSpace = 0; + bool quotaAvailable = true; + bool quotaRestricted = false; + + bool fullScanTrueUpRequired = false; + bool surpressLoggingOutput = false; + + // This is the value that needs testing when we are actually downloading and uploading data + ulong concurrentThreads = 16; + + // All application run-time paths are formulated from this as a set of defaults + // - What is the home path of the actual 'user' that is running the application + string defaultHomePath = ""; + // - What is the config path for the application. By default, this is ~/.config/onedrive but can be overridden by using --confdir + private string configDirName = defaultConfigDirName; + // - In case we have to use a system config directory such as '/etc/onedrive' or similar, store that path in this variable + private string systemConfigDirName = ""; + // - Store the configured converted octal value for directory permissions + private int configuredDirectoryPermissionMode; + // - Store the configured converted octal value for file permissions + private int configuredFilePermissionMode; + // - Store the 'delta_link' file path + private string deltaLinkFilePath = ""; + // - Store the 'items.sqlite3' file path + string databaseFilePath = ""; + // - Store the 'items-dryrun.sqlite3' file path + string databaseFilePathDryRun = ""; + // - Store the user 'config' file path + private string userConfigFilePath = ""; + // - Store the system 'config' file path + private string systemConfigFilePath = ""; + // - What is the 'config' file path that will be used? + private string applicableConfigFilePath = ""; + // - Store the 'sync_list' file path + string syncListFilePath = ""; + // - Store the 'business_shared_items' file path + string businessSharedItemsFilePath = ""; + + // Hash files so that we can detect when the configuration has changed, in items that will require a --resync + private string configHashFile = ""; + private string configBackupFile = ""; + private string syncListHashFile = ""; + private string businessSharedItemsHashFile = ""; + + // Store the actual 'runtime' hash + private string currentConfigHash = ""; + private string currentSyncListHash = ""; + private string currentBusinessSharedItemsHash = ""; + + // Store the previous config files hash values (file contents) + private string previousConfigHash = ""; + private string previousSyncListHash = ""; + private string previousBusinessSharedItemsHash = ""; + + // Store items that come in from the 'config' file, otherwise these need to be set the the defaults + private string configFileSyncDir = defaultSyncDir; + private string configFileSkipFile = defaultSkipFile; + private string configFileSkipDir = ""; // Default here is no directories are skipped + private string configFileDriveId = ""; // Default here is that no drive id is specified + private bool configFileSkipDotfiles = false; + private bool configFileSkipSymbolicLinks = false; + private bool configFileSyncBusinessSharedItems = false; + + // File permission values (set via initialise function) + private int convertedPermissionValue; + + // Array of values that are the actual application runtime configuration + // The values stored in these array's are the actual application configuration which can then be accessed by getValue & setValue + string[string] stringValues; + long[string] longValues; + bool[string] boolValues; + + bool shellEnvironmentSet = false; + + // Initialise the application configuration + bool initialise(string confdirOption) { + + // Default runtime configuration - entries in config file ~/.config/onedrive/config or derived from variables above + // An entry here means it can be set via the config file if there is a coresponding entry, read from config and set via update_from_args() + // The below becomes the 'default' application configuration before config file and/or cli options are overlayed on top + + // - Set the required default values + stringValues["application_id"] = defaultApplicationId; stringValues["log_dir"] = defaultLogFileDir; + stringValues["skip_dir"] = defaultSkipDir; + stringValues["skip_file"] = defaultSkipFile; + stringValues["sync_dir"] = defaultSyncDir; + stringValues["user_agent"] = defaultUserAgent; + // - The 'drive_id' is used when we specify a specific OneDrive ID when attempting to sync Shared Folders and SharePoint items stringValues["drive_id"] = ""; - stringValues["user_agent"] = ""; - boolValues["upload_only"] = false; - boolValues["check_nomount"] = false; - boolValues["check_nosync"] = false; - boolValues["download_only"] = false; - boolValues["disable_notifications"] = false; - boolValues["disable_download_validation"] = false; - boolValues["disable_upload_validation"] = false; - boolValues["enable_logging"] = false; - boolValues["force_http_11"] = false; - boolValues["local_first"] = false; - boolValues["no_remote_delete"] = false; - boolValues["skip_symlinks"] = false; - boolValues["debug_https"] = false; - boolValues["skip_dotfiles"] = false; - boolValues["dry_run"] = false; - boolValues["sync_root_files"] = false; - longValues["verbose"] = log.verbose; // might be initialized by the first getopt call! - // The amount of time (seconds) between monitor sync loops - longValues["monitor_interval"] = 300; - longValues["skip_size"] = 0; - longValues["min_notify_changes"] = 5; - longValues["monitor_log_frequency"] = 6; - // Number of N sync runs before performing a full local scan of sync_dir - // By default 12 which means every ~60 minutes a full disk scan of sync_dir will occur - // 'monitor_interval' * 'monitor_fullscan_frequency' = 3600 = 1 hour - longValues["monitor_fullscan_frequency"] = 12; - // Number of children in a path that is locally removed which will be classified as a 'big data delete' - longValues["classify_as_big_delete"] = 1000; - // Delete source after successful transfer - boolValues["remove_source_files"] = false; - // Strict matching for skip_dir - boolValues["skip_dir_strict_match"] = false; - // Allow for a custom Client ID / Application ID to be used to replace the inbuilt default - // This is a config file option ONLY - stringValues["application_id"] = ""; - // allow for resync to be set via config file - boolValues["resync"] = false; - // resync now needs to be acknowledged based on the 'risk' of using it - boolValues["resync_auth"] = false; - // Ignore data safety checks and overwrite local data rather than preserve & rename - // This is a config file option ONLY - boolValues["bypass_data_preservation"] = false; // Support National Azure AD endpoints as per https://docs.microsoft.com/en-us/graph/deployments // By default, if empty, use standard Azure AD URL's // Will support the following options: @@ -141,52 +219,31 @@ final class Config // AD Endpoint: https://login.chinacloudapi.cn // Graph Endpoint: https://microsoftgraph.chinacloudapi.cn stringValues["azure_ad_endpoint"] = ""; + // Support single-tenant applications that are not able to use the "common" multiplexer - stringValues["azure_tenant_id"] = "common"; - // Allow enable / disable of the syncing of OneDrive Business Shared Folders via configuration file - boolValues["sync_business_shared_folders"] = false; - // Configure the default folder permission attributes for newly created folders + stringValues["azure_tenant_id"] = ""; + // - Store how many times was --verbose added + longValues["verbose"] = log.verbose; // might also be initialised by the first getopt call! + // - The amount of time (seconds) between monitor sync loops + longValues["monitor_interval"] = 300; + // - What size of file should be skipped? + longValues["skip_size"] = 0; + // - How many 'loops' when using --monitor, before we print out high frequency recurring items? + longValues["monitor_log_frequency"] = 12; + // - Number of N sync runs before performing a full local scan of sync_dir + // By default 12 which means every ~60 minutes a full disk scan of sync_dir will occur + // 'monitor_interval' * 'monitor_fullscan_frequency' = 3600 = 1 hour + longValues["monitor_fullscan_frequency"] = 12; + // - Number of children in a path that is locally removed which will be classified as a 'big data delete' + longValues["classify_as_big_delete"] = 1000; + // - Configure the default folder permission attributes for newly created folders longValues["sync_dir_permissions"] = defaultDirectoryPermissionMode; - // Configure the default file permission attributes for newly created file + // - Configure the default file permission attributes for newly created file longValues["sync_file_permissions"] = defaultFilePermissionMode; - // Configure download / upload rate limits + // - Configure download / upload rate limits longValues["rate_limit"] = 0; - // To ensure we do not fill up the load disk, how much disk space should be reserved by default + // - To ensure we do not fill up the load disk, how much disk space should be reserved by default longValues["space_reservation"] = 50 * 2^^20; // 50 MB as Bytes - // Webhook options - boolValues["webhook_enabled"] = false; - stringValues["webhook_public_url"] = ""; - stringValues["webhook_listening_host"] = ""; - longValues["webhook_listening_port"] = 8888; - longValues["webhook_expiration_interval"] = 3600 * 24; - longValues["webhook_renewal_interval"] = 3600 * 12; - // Log to application output running configuration values - boolValues["display_running_config"] = false; - // Configure read-only authentication scope - boolValues["read_only_auth_scope"] = false; - // Flag to cleanup local files when using --download-only - boolValues["cleanup_local_files"] = false; - - // DEVELOPER OPTIONS - // display_memory = true | false - // - It may be desirable to display the memory usage of the application to assist with diagnosing memory issues with the application - // - This is especially beneficial when debugging or performing memory tests with Valgrind - boolValues["display_memory"] = false; - // monitor_max_loop = long value - // - It may be desirable to, when running in monitor mode, force monitor mode to 'quit' after X number of loops - // - This is especially beneficial when debugging or performing memory tests with Valgrind - longValues["monitor_max_loop"] = 0; - // display_sync_options = true | false - // - It may be desirable to see what options are being passed in to performSync() without enabling the full verbose debug logging - boolValues["display_sync_options"] = false; - // force_children_scan = true | false - // - Force client to use /children rather than /delta to query changes on OneDrive - // - This option flags nationalCloudDeployment as true, forcing the client to act like it is using a National Cloud Deployment - boolValues["force_children_scan"] = false; - // display_processing_time = true | false - // - Enabling this option will add function processing times to the console output - // - This then enables tracking of where the application is spending most amount of time when processing data when users have questions re performance - boolValues["display_processing_time"] = false; // HTTPS & CURL Operation Settings // - Maximum time an operation is allowed to take @@ -200,71 +257,163 @@ final class Config longValues["data_timeout"] = defaultDataTimeout; // What IP protocol version should be used when communicating with OneDrive longValues["ip_protocol_version"] = defaultIpProtocol; // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only - + + // - Do we wish to upload only? + boolValues["upload_only"] = false; + // - Do we need to check for the .nomount file on the mount point? + boolValues["check_nomount"] = false; + // - Do we need to check for the .nosync file anywhere? + boolValues["check_nosync"] = false; + // - Do we wish to download only? + boolValues["download_only"] = false; + // - Do we disable notifications? + boolValues["disable_notifications"] = false; + // - Do we bypass all the download validation? + // This is critically important not to disable, but because of SharePoint 'feature' can be highly desirable to enable + boolValues["disable_download_validation"] = false; + // - Do we bypass all the upload validation? + // This is critically important not to disable, but because of SharePoint 'feature' can be highly desirable to enable + boolValues["disable_upload_validation"] = false; + // - Do we enable logging? + boolValues["enable_logging"] = false; + // - Do we force HTTP 1.1 for connections to the OneDrive API + // By default we use the curl library default, which should be HTTP2 for most operations governed by the OneDrive API + boolValues["force_http_11"] = false; + // - Do we treat the local file system as the source of truth for our data? + boolValues["local_first"] = false; + // - Do we ignore local file deletes, so that all files are retained online? + boolValues["no_remote_delete"] = false; + // - Do we skip symbolic links? + boolValues["skip_symlinks"] = false; + // - Do we enable debugging for all HTTPS flows. Critically important for debugging API issues. + boolValues["debug_https"] = false; + // - Do we skip .files and .folders? + boolValues["skip_dotfiles"] = false; + // - Do we perform a 'dry-run' with no local or remote changes actually being performed? + boolValues["dry_run"] = false; + // - Do we sync all the files in the 'sync_dir' root? + boolValues["sync_root_files"] = false; + // - Do we delete source after successful transfer? + boolValues["remove_source_files"] = false; + // - Do we perform strict matching for skip_dir? + boolValues["skip_dir_strict_match"] = false; + // - Do we perform a --resync? + boolValues["resync"] = false; + // - resync now needs to be acknowledged based on the 'risk' of using it + boolValues["resync_auth"] = false; + // - Ignore data safety checks and overwrite local data rather than preserve & rename + // This is a config file option ONLY + boolValues["bypass_data_preservation"] = false; + // - Allow enable / disable of the syncing of OneDrive Business Shared items (files & folders) via configuration file + boolValues["sync_business_shared_items"] = false; + // - Log to application output running configuration values + boolValues["display_running_config"] = false; + // - Configure read-only authentication scope + boolValues["read_only_auth_scope"] = false; + // - Flag to cleanup local files when using --download-only + boolValues["cleanup_local_files"] = false; + + // Webhook Feature Options + stringValues["webhook_public_url"] = ""; + stringValues["webhook_listening_host"] = ""; + longValues["webhook_listening_port"] = 8888; + longValues["webhook_expiration_interval"] = 3600 * 24; + longValues["webhook_renewal_interval"] = 3600 * 12; + boolValues["webhook_enabled"] = false; + + // Print in debug the application version as soon as possible + //log.vdebug("Application Version: ", strip(import("version"))); + string tempVersion = "v2.5.0-alpha-2" ~ " GitHub version: " ~ strip(import("version")); + log.vdebug("Application Version: ", tempVersion); + // EXPAND USERS HOME DIRECTORY // Determine the users home directory. // Need to avoid using ~ here as expandTilde() below does not interpret correctly when running under init.d or systemd scripts // Check for HOME environment variable if (environment.get("HOME") != ""){ // Use HOME environment variable - log.vdebug("homePath: HOME environment variable set"); - homePath = environment.get("HOME"); + log.vdebug("runtime_environment: HOME environment variable detected, expansion of '~' should be possible"); + defaultHomePath = environment.get("HOME"); + shellEnvironmentSet = true; } else { if ((environment.get("SHELL") == "") && (environment.get("USER") == "")){ // No shell is set or username - observed case when running as systemd service under CentOS 7.x - log.vdebug("homePath: WARNING - no HOME environment variable set"); - log.vdebug("homePath: WARNING - no SHELL environment variable set"); - log.vdebug("homePath: WARNING - no USER environment variable set"); - homePath = "/root"; + log.vdebug("runtime_environment: No HOME, SHELL or USER environment variable configuration detected. Expansion of '~' not possible"); + defaultHomePath = "/root"; + shellEnvironmentSet = false; + } else { // A shell & valid user is set, but no HOME is set, use ~ which can be expanded - log.vdebug("homePath: WARNING - no HOME environment variable set"); - homePath = "~"; + log.vdebug("runtime_environment: SHELL and USER environment variable detected, expansion of '~' should be possible"); + defaultHomePath = "~"; + shellEnvironmentSet = true; } } - - // Output homePath calculation - log.vdebug("homePath: ", homePath); - - // Determine the correct configuration directory to use + // outcome of setting defaultHomePath + log.vdebug("runtime_environment: Calculated defaultHomePath: ", defaultHomePath); + + // DEVELOPER OPTIONS + // display_memory = true | false + // - It may be desirable to display the memory usage of the application to assist with diagnosing memory issues with the application + // - This is especially beneficial when debugging or performing memory tests with Valgrind + boolValues["display_memory"] = false; + // monitor_max_loop = long value + // - It may be desirable to, when running in monitor mode, force monitor mode to 'quit' after X number of loops + // - This is especially beneficial when debugging or performing memory tests with Valgrind + longValues["monitor_max_loop"] = 0; + // display_sync_options = true | false + // - It may be desirable to see what options are being passed in to performSync() without enabling the full verbose debug logging + boolValues["display_sync_options"] = false; + // force_children_scan = true | false + // - Force client to use /children rather than /delta to query changes on OneDrive + // - This option flags nationalCloudDeployment as true, forcing the client to act like it is using a National Cloud Deployment model + boolValues["force_children_scan"] = false; + // display_processing_time = true | false + // - Enabling this option will add function processing times to the console output + // - This then enables tracking of where the application is spending most amount of time when processing data when users have questions re performance + boolValues["display_processing_time"] = false; + + // Function variables string configDirBase; string systemConfigDirBase; - if (confdirOption != "") { + bool configurationInitialised = false; + + // Initialise the application configuration, using the provided --confdir option was passed in + if (!confdirOption.empty) { // A CLI 'confdir' was passed in - // Clean up any stray " .. these should not be there ... + // Clean up any stray " .. these should not be there for correct process handling of the configuration option confdirOption = strip(confdirOption,"\""); log.vdebug("configDirName: CLI override to set configDirName to: ", confdirOption); if (canFind(confdirOption,"~")) { // A ~ was found - log.vdebug("configDirName: A '~' was found in configDirName, using the calculated 'homePath' to replace '~'"); - configDirName = homePath ~ strip(confdirOption,"~","~"); + log.vdebug("configDirName: A '~' was found in configDirName, using the calculated 'defaultHomePath' to replace '~'"); + configDirName = defaultHomePath ~ strip(confdirOption,"~","~"); } else { configDirName = confdirOption; } } else { - // Determine the base directory relative to which user specific configuration files should be stored. + // Determine the base directory relative to which user specific configuration files should be stored if (environment.get("XDG_CONFIG_HOME") != ""){ log.vdebug("configDirBase: XDG_CONFIG_HOME environment variable set"); configDirBase = environment.get("XDG_CONFIG_HOME"); } else { // XDG_CONFIG_HOME does not exist on systems where X11 is not present - ie - headless systems / servers log.vdebug("configDirBase: WARNING - no XDG_CONFIG_HOME environment variable set"); - configDirBase = homePath ~ "/.config"; + configDirBase = buildNormalizedPath(buildPath(defaultHomePath, ".config")); // Also set up a path to pre-shipped shared configs (which can be overridden by supplying a config file in userspace) systemConfigDirBase = "/etc"; } - // Output configDirBase calculation log.vdebug("configDirBase: ", configDirBase); - // Set the default application configuration directory - log.vdebug("configDirName: Configuring application to use default config path"); + // Set the calculated application configuration directory + log.vdebug("configDirName: Configuring application to use calculated config path"); // configDirBase contains the correct path so we do not need to check for presence of '~' - configDirName = configDirBase ~ "/onedrive"; + configDirName = buildNormalizedPath(buildPath(configDirBase, "onedrive")); // systemConfigDirBase contains the correct path so we do not need to check for presence of '~' - systemConfigDirName = systemConfigDirBase ~ "/onedrive"; + systemConfigDirName = buildNormalizedPath(buildPath(systemConfigDirBase, "onedrive")); } - - // Config directory options all determined + + // Configuration directory should now have been correctly identified if (!exists(configDirName)) { // create the directory mkdirRecurse(configDirName); @@ -276,365 +425,242 @@ final class Config if (!isDir(configDirName)) { if (!confdirOption.empty) { // the configuration path was passed in by the user .. user error - writeln("ERROR: --confdir entered value is an existing file instead of an existing directory"); + log.error("ERROR: --confdir entered value is an existing file instead of an existing directory"); } else { // other error - writeln("ERROR: ~/.config/onedrive is a file rather than a directory"); + log.error("ERROR: " ~ confdirOption ~ " is a file rather than a directory"); } // Must exit exit(EXIT_FAILURE); } } - - // configDirName has a trailing / - if (!configDirName.empty) log.vlog("Using 'user' Config Dir: ", configDirName); - if (!systemConfigDirName.empty) log.vlog("Using 'system' Config Dir: ", systemConfigDirName); - + // Update application set variables based on configDirName - refreshTokenFilePath = buildNormalizedPath(configDirName ~ "/refresh_token"); - deltaLinkFilePath = buildNormalizedPath(configDirName ~ "/delta_link"); - databaseFilePath = buildNormalizedPath(configDirName ~ "/items.sqlite3"); - databaseFilePathDryRun = buildNormalizedPath(configDirName ~ "/items-dryrun.sqlite3"); - uploadStateFilePath = buildNormalizedPath(configDirName ~ "/resume_upload"); - userConfigFilePath = buildNormalizedPath(configDirName ~ "/config"); - syncListFilePath = buildNormalizedPath(configDirName ~ "/sync_list"); - systemConfigFilePath = buildNormalizedPath(systemConfigDirName ~ "/config"); - businessSharedFolderFilePath = buildNormalizedPath(configDirName ~ "/business_shared_folders"); - + // - What is the full path for the 'refresh_token' + refreshTokenFilePath = buildNormalizedPath(buildPath(configDirName, "refresh_token")); + // - What is the full path for the 'delta_link' + deltaLinkFilePath = buildNormalizedPath(buildPath(configDirName, "delta_link")); + // - What is the full path for the 'items.sqlite3' - the database cache file + databaseFilePath = buildNormalizedPath(buildPath(configDirName, "items.sqlite3")); + // - What is the full path for the 'items-dryrun.sqlite3' - the dry-run database cache file + databaseFilePathDryRun = buildNormalizedPath(buildPath(configDirName, "items-dryrun.sqlite3")); + // - What is the full path for the 'resume_upload' + uploadSessionFilePath = buildNormalizedPath(buildPath(configDirName, "session_upload")); + // - What is the full path for the 'sync_list' file + syncListFilePath = buildNormalizedPath(buildPath(configDirName, "sync_list")); + // - What is the full path for the 'config' - the user file to configure the application + userConfigFilePath = buildNormalizedPath(buildPath(configDirName, "config")); + // - What is the full path for the system 'config' file if it is required + systemConfigFilePath = buildNormalizedPath(buildPath(systemConfigDirName, "config")); + + // - What is the full path for the 'business_shared_items' + businessSharedItemsFilePath = buildNormalizedPath(buildPath(configDirName, "business_shared_items")); + + // To determine if any configuration items has changed, where a --resync would be required, we need to have a hash file for the following items + // - 'config.backup' file + // - applicable 'config' file + // - 'sync_list' file + // - 'business_shared_items' file + configBackupFile = buildNormalizedPath(buildPath(configDirName, ".config.backup")); + configHashFile = buildNormalizedPath(buildPath(configDirName, ".config.hash")); + syncListHashFile = buildNormalizedPath(buildPath(configDirName, ".sync_list.hash")); + businessSharedItemsHashFile = buildNormalizedPath(buildPath(configDirName, ".business_shared_items.hash")); + // Debug Output for application set variables based on configDirName - log.vdebug("refreshTokenFilePath = ", refreshTokenFilePath); - log.vdebug("deltaLinkFilePath = ", deltaLinkFilePath); - log.vdebug("databaseFilePath = ", databaseFilePath); + log.vdebug("refreshTokenFilePath = ", refreshTokenFilePath); + log.vdebug("deltaLinkFilePath = ", deltaLinkFilePath); + log.vdebug("databaseFilePath = ", databaseFilePath); log.vdebug("databaseFilePathDryRun = ", databaseFilePathDryRun); - log.vdebug("uploadStateFilePath = ", uploadStateFilePath); - log.vdebug("userConfigFilePath = ", userConfigFilePath); - log.vdebug("syncListFilePath = ", syncListFilePath); - log.vdebug("systemConfigFilePath = ", systemConfigFilePath); - log.vdebug("businessSharedFolderFilePath = ", businessSharedFolderFilePath); - } - - bool initialize() - { - // Initialise the application + log.vdebug("uploadSessionFilePath = ", uploadSessionFilePath); + log.vdebug("userConfigFilePath = ", userConfigFilePath); + log.vdebug("syncListFilePath = ", syncListFilePath); + log.vdebug("systemConfigFilePath = ", systemConfigFilePath); + log.vdebug("configBackupFile = ", configBackupFile); + log.vdebug("configHashFile = ", configHashFile); + log.vdebug("syncListHashFile = ", syncListHashFile); + log.vdebug("businessSharedItemsFilePath = ", businessSharedItemsFilePath); + log.vdebug("businessSharedItemsHashFile = ", businessSharedItemsHashFile); + + // Configure the Hash and Backup File Permission Value + string valueToConvert = to!string(defaultFilePermissionMode); + auto convertedValue = parse!long(valueToConvert, 8); + convertedPermissionValue = to!int(convertedValue); + + // Initialise the application using the configuration file if it exists if (!exists(userConfigFilePath)) { // 'user' configuration file does not exist // Is there a system configuration file? if (!exists(systemConfigFilePath)) { // 'system' configuration file does not exist log.vlog("No user or system config file found, using application defaults"); - return true; + applicableConfigFilePath = userConfigFilePath; + configurationInitialised = true; } else { // 'system' configuration file exists // can we load the configuration file without error? - if (load(systemConfigFilePath)) { + if (loadConfigFile(systemConfigFilePath)) { // configuration file loaded without error log.log("System configuration file successfully loaded"); - return true; + // Set 'applicableConfigFilePath' to equal the 'config' we loaded + applicableConfigFilePath = systemConfigFilePath; + // Update the configHashFile path value to ensure we are using the system 'config' file for the hash + configHashFile = buildNormalizedPath(buildPath(systemConfigDirName, ".config.hash")); + configurationInitialised = true; } else { // there was a problem loading the configuration file - log.log("System configuration file has errors - please check your configuration"); - return false; + log.log("\nSystem configuration file has errors - please check your configuration"); } - } + } } else { // 'user' configuration file exists // can we load the configuration file without error? - if (load(userConfigFilePath)) { + if (loadConfigFile(userConfigFilePath)) { // configuration file loaded without error log.log("Configuration file successfully loaded"); - return true; + // Set 'applicableConfigFilePath' to equal the 'config' we loaded + applicableConfigFilePath = userConfigFilePath; + configurationInitialised = true; } else { // there was a problem loading the configuration file - log.log("Configuration file has errors - please check your configuration"); - return false; + log.log("\nConfiguration file has errors - please check your configuration"); + } + } + + // Advise the user path that we will use for the application state data + if (canFind(applicableConfigFilePath, configDirName)) { + log.vlog("Using 'user' configuration path for application state data: ", configDirName); + } else { + if (canFind(applicableConfigFilePath, systemConfigDirName)) { + log.vlog("Using 'system' configuration path for application state data: ", systemConfigDirName); + } + } + + // return if the configuration was initialised + return configurationInitialised; + } + + // Create a backup of the 'config' file if it does not exist + void createBackupConfigFile() { + if (!getValueBool("dry_run")) { + // Is there a backup of the config file if the config file exists? + if (exists(applicableConfigFilePath)) { + log.vdebug("Creating a backup of the applicable config file"); + // create backup copy of current config file + std.file.copy(applicableConfigFilePath, configBackupFile); + // File Copy should only be readable by the user who created it - 0600 permissions needed + configBackupFile.setAttributes(convertedPermissionValue); } + } else { + // --dry-run scenario ... technically we should not be making any local file changes ....... + log.log("DRY RUN: Not creating backup config file as --dry-run has been used"); + } + } + + // Return a given string value based on the provided key + string getValueString(string key) { + auto p = key in stringValues; + if (p) { + return *p; + } else { + throw new Exception("Missing config value: " ~ key); } } - void update_from_args(string[] args) - { - // Add additional options that are NOT configurable via config file - stringValues["create_directory"] = ""; - stringValues["create_share_link"] = ""; - stringValues["destination_directory"] = ""; - stringValues["get_file_link"] = ""; - stringValues["modified_by"] = ""; - stringValues["get_o365_drive_id"] = ""; - stringValues["remove_directory"] = ""; - stringValues["single_directory"] = ""; - stringValues["source_directory"] = ""; - stringValues["auth_files"] = ""; - stringValues["auth_response"] = ""; - boolValues["display_config"] = false; - boolValues["display_sync_status"] = false; - boolValues["print_token"] = false; - boolValues["logout"] = false; - boolValues["reauth"] = false; - boolValues["monitor"] = false; - boolValues["synchronize"] = false; - boolValues["force"] = false; - boolValues["list_business_shared_folders"] = false; - boolValues["force_sync"] = false; - boolValues["with_editing_perms"] = false; + // Return a given long value based on the provided key + long getValueLong(string key) { + auto p = key in longValues; + if (p) { + return *p; + } else { + throw new Exception("Missing config value: " ~ key); + } + } - // Application Startup option validation - try { - string tmpStr; - bool tmpBol; - long tmpVerb; - // duplicated from main.d to get full help output! - auto opt = getopt( + // Return a given bool value based on the provided key + bool getValueBool(string key) { + auto p = key in boolValues; + if (p) { + return *p; + } else { + throw new Exception("Missing config value: " ~ key); + } + } + + // Set a given string value based on the provided key + void setValueString(string key, string value) { + stringValues[key] = value; + } - args, - std.getopt.config.bundling, - std.getopt.config.caseSensitive, - "auth-files", - "Perform authentication not via interactive dialog but via files read/writes to these files.", - &stringValues["auth_files"], - "auth-response", - "Perform authentication not via interactive dialog but via providing the response url directly.", - &stringValues["auth_response"], - "check-for-nomount", - "Check for the presence of .nosync in the syncdir root. If found, do not perform sync.", - &boolValues["check_nomount"], - "check-for-nosync", - "Check for the presence of .nosync in each directory. If found, skip directory from sync.", - &boolValues["check_nosync"], - "classify-as-big-delete", - "Number of children in a path that is locally removed which will be classified as a 'big data delete'", - &longValues["classify_as_big_delete"], - "cleanup-local-files", - "Cleanup additional local files when using --download-only. This will remove local data.", - &boolValues["cleanup_local_files"], - "create-directory", - "Create a directory on OneDrive - no sync will be performed.", - &stringValues["create_directory"], - "create-share-link", - "Create a shareable link for an existing file on OneDrive", - &stringValues["create_share_link"], - "debug-https", - "Debug OneDrive HTTPS communication.", - &boolValues["debug_https"], - "destination-directory", - "Destination directory for renamed or move on OneDrive - no sync will be performed.", - &stringValues["destination_directory"], - "disable-notifications", - "Do not use desktop notifications in monitor mode.", - &boolValues["disable_notifications"], - "disable-download-validation", - "Disable download validation when downloading from OneDrive", - &boolValues["disable_download_validation"], - "disable-upload-validation", - "Disable upload validation when uploading to OneDrive", - &boolValues["disable_upload_validation"], - "display-config", - "Display what options the client will use as currently configured - no sync will be performed.", - &boolValues["display_config"], - "display-running-config", - "Display what options the client has been configured to use on application startup.", - &boolValues["display_running_config"], - "display-sync-status", - "Display the sync status of the client - no sync will be performed.", - &boolValues["display_sync_status"], - "download-only", - "Replicate the OneDrive online state locally, by only downloading changes from OneDrive. Do not upload local changes to OneDrive.", - &boolValues["download_only"], - "dry-run", - "Perform a trial sync with no changes made", - &boolValues["dry_run"], - "enable-logging", - "Enable client activity to a separate log file", - &boolValues["enable_logging"], - "force-http-11", - "Force the use of HTTP 1.1 for all operations", - &boolValues["force_http_11"], - "force", - "Force the deletion of data when a 'big delete' is detected", - &boolValues["force"], - "force-sync", - "Force a synchronization of a specific folder, only when using --synchronize --single-directory and ignore all non-default skip_dir and skip_file rules", - &boolValues["force_sync"], - "get-file-link", - "Display the file link of a synced file", - &stringValues["get_file_link"], - "get-O365-drive-id", - "Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library", - &stringValues["get_o365_drive_id"], - "local-first", - "Synchronize from the local directory source first, before downloading changes from OneDrive.", - &boolValues["local_first"], - "log-dir", - "Directory where logging output is saved to, needs to end with a slash.", - &stringValues["log_dir"], - "logout", - "Logout the current user", - &boolValues["logout"], - "min-notify-changes", - "Minimum number of pending incoming changes necessary to trigger a desktop notification", - &longValues["min_notify_changes"], - "modified-by", - "Display the last modified by details of a given path", - &stringValues["modified_by"], - "monitor|m", - "Keep monitoring for local and remote changes", - &boolValues["monitor"], - "monitor-interval", - "Number of seconds by which each sync operation is undertaken when idle under monitor mode.", - &longValues["monitor_interval"], - "monitor-fullscan-frequency", - "Number of sync runs before performing a full local scan of the synced directory", - &longValues["monitor_fullscan_frequency"], - "monitor-log-frequency", - "Frequency of logging in monitor mode", - &longValues["monitor_log_frequency"], - "no-remote-delete", - "Do not delete local file 'deletes' from OneDrive when using --upload-only", - &boolValues["no_remote_delete"], - "print-token", - "Print the access token, useful for debugging", - &boolValues["print_token"], - "reauth", - "Reauthenticate the client with OneDrive", - &boolValues["reauth"], - "resync", - "Forget the last saved state, perform a full sync", - &boolValues["resync"], - "resync-auth", - "Approve the use of performing a --resync action", - &boolValues["resync_auth"], - "remove-directory", - "Remove a directory on OneDrive - no sync will be performed.", - &stringValues["remove_directory"], - "remove-source-files", - "Remove source file after successful transfer to OneDrive when using --upload-only", - &boolValues["remove_source_files"], - "single-directory", - "Specify a single local directory within the OneDrive root to sync.", - &stringValues["single_directory"], - "skip-dot-files", - "Skip dot files and folders from syncing", - &boolValues["skip_dotfiles"], - "skip-file", - "Skip any files that match this pattern from syncing", - &stringValues["skip_file"], - "skip-dir", - "Skip any directories that match this pattern from syncing", - &stringValues["skip_dir"], - "skip-size", - "Skip new files larger than this size (in MB)", - &longValues["skip_size"], - "skip-dir-strict-match", - "When matching skip_dir directories, only match explicit matches", - &boolValues["skip_dir_strict_match"], - "skip-symlinks", - "Skip syncing of symlinks", - &boolValues["skip_symlinks"], - "source-directory", - "Source directory to rename or move on OneDrive - no sync will be performed.", - &stringValues["source_directory"], - "space-reservation", - "The amount of disk space to reserve (in MB) to avoid 100% disk space utilisation", - &longValues["space_reservation"], - "syncdir", - "Specify the local directory used for synchronization to OneDrive", - &stringValues["sync_dir"], - "synchronize", - "Perform a synchronization", - &boolValues["synchronize"], - "sync-root-files", - "Sync all files in sync_dir root when using sync_list.", - &boolValues["sync_root_files"], - "upload-only", - "Replicate the locally configured sync_dir state to OneDrive, by only uploading local changes to OneDrive. Do not download changes from OneDrive.", - &boolValues["upload_only"], - "user-agent", - "Specify a User Agent string to the http client", - &stringValues["user_agent"], - "confdir", - "Set the directory used to store the configuration files", - &tmpStr, - "verbose|v+", - "Print more details, useful for debugging (repeat for extra debugging)", - &tmpVerb, - "version", - "Print the version and exit", - &tmpBol, - "list-shared-folders", - "List OneDrive Business Shared Folders", - &boolValues["list_business_shared_folders"], - "sync-shared-folders", - "Sync OneDrive Business Shared Folders", - &boolValues["sync_business_shared_folders"], - "with-editing-perms", - "Create a read-write shareable link for an existing file on OneDrive when used with --create-share-link ", - &boolValues["with_editing_perms"] - ); - if (opt.helpWanted) { - outputLongHelp(opt.options); - exit(EXIT_SUCCESS); - } - } catch (GetOptException e) { - log.error(e.msg); - log.error("Try 'onedrive -h' for more information"); - exit(EXIT_FAILURE); - } catch (Exception e) { - // error - log.error(e.msg); - log.error("Try 'onedrive -h' for more information"); - exit(EXIT_FAILURE); - } + // Set a given long value based on the provided key + void setValueLong(string key, long value) { + longValues[key] = value; } - string getValueString(string key) - { - auto p = key in stringValues; - if (p) { - return *p; - } else { - throw new Exception("Missing config value: " ~ key); - } + // Set a given long value based on the provided key + void setValueBool(string key, bool value) { + boolValues[key] = value; } - - long getValueLong(string key) - { - auto p = key in longValues; - if (p) { - return *p; + + // Configure the directory octal permission value + void configureRequiredDirectoryPermisions() { + // return the directory permission mode required + // - return octal!defaultDirectoryPermissionMode; ... cant be used .. which is odd + // Error: variable defaultDirectoryPermissionMode cannot be read at compile time + if (getValueLong("sync_dir_permissions") != defaultDirectoryPermissionMode) { + // return user configured permissions as octal integer + string valueToConvert = to!string(getValueLong("sync_dir_permissions")); + auto convertedValue = parse!long(valueToConvert, 8); + configuredDirectoryPermissionMode = to!int(convertedValue); } else { - throw new Exception("Missing config value: " ~ key); + // return default as octal integer + string valueToConvert = to!string(defaultDirectoryPermissionMode); + auto convertedValue = parse!long(valueToConvert, 8); + configuredDirectoryPermissionMode = to!int(convertedValue); } } - bool getValueBool(string key) - { - auto p = key in boolValues; - if (p) { - return *p; + // Configure the file octal permission value + void configureRequiredFilePermisions() { + // return the file permission mode required + // - return octal!defaultFilePermissionMode; ... cant be used .. which is odd + // Error: variable defaultFilePermissionMode cannot be read at compile time + if (getValueLong("sync_file_permissions") != defaultFilePermissionMode) { + // return user configured permissions as octal integer + string valueToConvert = to!string(getValueLong("sync_file_permissions")); + auto convertedValue = parse!long(valueToConvert, 8); + configuredFilePermissionMode = to!int(convertedValue); } else { - throw new Exception("Missing config value: " ~ key); + // return default as octal integer + string valueToConvert = to!string(defaultFilePermissionMode); + auto convertedValue = parse!long(valueToConvert, 8); + configuredFilePermissionMode = to!int(convertedValue); } } - void setValueBool(string key, bool value) - { - boolValues[key] = value; - } - - void setValueString(string key, string value) - { - stringValues[key] = value; + // Read the configuredDirectoryPermissionMode and return + int returnRequiredDirectoryPermisions() { + if (configuredDirectoryPermissionMode == 0) { + // the configured value is zero, this means that directories would get + // values of d--------- + configureRequiredDirectoryPermisions(); + } + return configuredDirectoryPermissionMode; } - void setValueLong(string key, long value) - { - longValues[key] = value; + // Read the configuredFilePermissionMode and return + int returnRequiredFilePermisions() { + if (configuredFilePermissionMode == 0) { + // the configured value is zero + configureRequiredFilePermisions(); + } + return configuredFilePermissionMode; } - - // load a configuration file - private bool load(string filename) - { + + // Load a configuration file from the provided filename + private bool loadConfigFile(string filename) { // configure function variables try { + log.log("Reading configuration file: ", filename); readText(filename); } catch (std.file.FileException e) { // Unable to access required file @@ -679,6 +705,22 @@ final class Config c.popFront(); // only accept "true" as true value. TODO Should we support other formats? setValueBool(key, c.front.dup == "true" ? true : false); + + // skip_dotfiles tracking for change + if (key == "skip_dotfiles") { + configFileSkipDotfiles = true; + } + + // skip_symlinks tracking for change + if (key == "skip_symlinks") { + configFileSkipSymbolicLinks = true; + } + + // sync_business_shared_items tracking for change + if (key == "sync_business_shared_items") { + configFileSyncBusinessSharedItems = true; + } + } else { auto pp = key in stringValues; if (pp) { @@ -688,7 +730,23 @@ final class Config // --syncdir ARG // --skip-file ARG // --skip-dir ARG - if (key == "sync_dir") configFileSyncDir = c.front.dup; + + // sync_dir + if (key == "sync_dir") { + // configure a temp variable + string tempSyncDirValue = c.front.dup; + // is this empty ? + if (!strip(tempSyncDirValue).empty) { + configFileSyncDir = tempSyncDirValue; + } else { + // sync_dir cannot be empty + log.error("Invalid value for key in config file: ", key); + log.error("sync_dir in config file cannot be empty - this is a fatal error and must be corrected"); + exit(EXIT_FAILURE); + } + } + + // skip_file if (key == "skip_file") { // Handle multiple entries of skip_file if (configFileSkipFile.empty) { @@ -700,6 +758,8 @@ final class Config setValueString("skip_file", configFileSkipFile); } } + + // skip_dir if (key == "skip_dir") { // Handle multiple entries of skip_dir if (configFileSkipDir.empty) { @@ -711,6 +771,7 @@ final class Config setValueString("skip_dir", configFileSkipDir); } } + // --single-directory Strip quotation marks from path // This is an issue when using ONEDRIVE_SINGLE_DIRECTORY with Docker if (key == "single_directory") { @@ -718,9 +779,10 @@ final class Config string configSingleDirectory = strip(to!string(c.front.dup), "\""); setValueString("single_directory", configSingleDirectory); } + // Azure AD Configuration if (key == "azure_ad_endpoint") { - string azureConfigValue = c.front.dup; + string azureConfigValue = strip(c.front.dup); switch(azureConfigValue) { case "": log.log("Using config option for Global Azure AD Endpoints"); @@ -742,24 +804,143 @@ final class Config log.log("Unknown Azure AD Endpoint - using Global Azure AD Endpoints"); } } + + // Application ID + if (key == "application_id") { + // This key cannot be empty + string tempApplicationId = strip(c.front.dup); + if (tempApplicationId.empty) { + log.log("Invalid value for key in config file - using default value: ", key); + log.vdebug("application_id in config file cannot be empty - using default application_id"); + setValueString("application_id", defaultApplicationId); + } else { + setValueString("application_id", tempApplicationId); + } + } + + // Drive ID + if (key == "drive_id") { + // This key cannot be empty + string tempApplicationId = strip(c.front.dup); + if (tempApplicationId.empty) { + log.error("Invalid value for key in config file: ", key); + log.error("drive_id in config file cannot be empty - this is a fatal error and must be corrected"); + exit(EXIT_FAILURE); + } else { + setValueString("drive_id", tempApplicationId); + configFileDriveId = tempApplicationId; + } + } + + // Log Directory + if (key == "log_dir") { + // This key cannot be empty + string tempLogDir = strip(c.front.dup); + if (tempLogDir.empty) { + log.log("Invalid value for key in config file - using default value: ", key); + log.vdebug("log_dir in config file cannot be empty - using default log_dir"); + setValueString("log_dir", defaultLogFileDir); + } else { + setValueString("log_dir", tempLogDir); + } + } + } else { auto ppp = key in longValues; if (ppp) { c.popFront(); - setValueLong(key, to!long(c.front.dup)); - // if key is space_reservation we have to calculate MB -> bytes + ulong thisConfigValue; + + // Can this value actually be converted to an integer? + try { + thisConfigValue = to!long(c.front.dup); + } catch (std.conv.ConvException) { + log.log("Invalid value for key in config file: ", key); + return false; + } + + setValueLong(key, thisConfigValue); + + // if key is 'monitor_interval' the value must be 300 or greater + if (key == "monitor_interval") { + // temp value + ulong tempValue = thisConfigValue; + // the temp value needs to be greater than 300 + if (tempValue < 300) { + log.log("Invalid value for key in config file - using default value: ", key); + tempValue = 300; + } + setValueLong("monitor_interval", to!long(tempValue)); + } + + // if key is 'monitor_fullscan_frequency' the value must be 12 or greater + if (key == "monitor_fullscan_frequency") { + // temp value + ulong tempValue = thisConfigValue; + // the temp value needs to be greater than 12 + if (tempValue < 12) { + // If this is not set to zero (0) then we are not disabling 'monitor_fullscan_frequency' + if (tempValue != 0) { + // invalid value + log.log("Invalid value for key in config file - using default value: ", key); + tempValue = 12; + } + } + setValueLong("monitor_fullscan_frequency", to!long(tempValue)); + } + + // if key is 'space_reservation' we have to calculate MB -> bytes if (key == "space_reservation") { // temp value - ulong tempValue = to!long(c.front.dup); + ulong tempValue = thisConfigValue; // a value of 0 needs to be made at least 1MB .. if (tempValue == 0) { + log.log("Invalid value for key in config file - using 1MB: ", key); tempValue = 1; } setValueLong("space_reservation", to!long(tempValue * 2^^20)); } + + // if key is 'ip_protocol_version' this has to be a value of 0 or 1 or 2 .. nothing else + if (key == "ip_protocol_version") { + // temp value + ulong tempValue = thisConfigValue; + // If greater than 2, set to default + if (tempValue > 2) { + log.log("Invalid value for key in config file - using default value: ", key); + // Set to default of 0 + tempValue = 0; + } + setValueLong("ip_protocol_version", to!long(tempValue)); + } + } else { log.log("Unknown key in config file: ", key); - return false; + + bool ignore_depreciation = false; + + // min_notify_changes has been depreciated + if (key == "min_notify_changes") { + log.log("\nThe option 'min_notify_changes' has been depreciated and will be ignored. Please read the updated documentation and update your client configuration."); + writeln(); + ignore_depreciation = true; + } + + // force_http_2 has been depreciated + if (key == "force_http_2") { + log.log("\nThe option 'force_http_2' has been depreciated and will be ignored. Please read the updated documentation and update your client configuration."); + writeln(); + ignore_depreciation = true; + } + + // Application configuration update required for Business Shared Folders + if (key == "sync_business_shared_folders") { + log.log("\nThe process for synchronising Microsoft OneDrive Business Shared Folders has changed."); + log.log("Please review the revised documentation on how to configure this application feature. You must update your client configuration and make any necessary online adjustments accordingly."); + writeln(); + } + // Return false + return ignore_depreciation; } } } @@ -768,134 +949,1289 @@ final class Config return false; } } + + // Close the file access + file.close(); + // Free object and memory + object.destroy(file); + object.destroy(range); return true; } + + // Update the application configuration based on CLI passed in parameters + void updateFromArgs(string[] cliArgs) { + // Add additional options that are NOT configurable via config file + stringValues["create_directory"] = ""; + stringValues["create_share_link"] = ""; + stringValues["destination_directory"] = ""; + stringValues["get_file_link"] = ""; + stringValues["modified_by"] = ""; + stringValues["sharepoint_library_name"] = ""; + stringValues["remove_directory"] = ""; + stringValues["single_directory"] = ""; + stringValues["source_directory"] = ""; + stringValues["auth_files"] = ""; + stringValues["auth_response"] = ""; + boolValues["display_config"] = false; + boolValues["display_sync_status"] = false; + boolValues["print_token"] = false; + boolValues["logout"] = false; + boolValues["reauth"] = false; + boolValues["monitor"] = false; + boolValues["synchronize"] = false; + boolValues["force"] = false; + boolValues["list_business_shared_items"] = false; + boolValues["force_sync"] = false; + boolValues["with_editing_perms"] = false; - void configureRequiredDirectoryPermisions() { - // return the directory permission mode required - // - return octal!defaultDirectoryPermissionMode; ... cant be used .. which is odd - // Error: variable defaultDirectoryPermissionMode cannot be read at compile time - if (getValueLong("sync_dir_permissions") != defaultDirectoryPermissionMode) { - // return user configured permissions as octal integer - string valueToConvert = to!string(getValueLong("sync_dir_permissions")); - auto convertedValue = parse!long(valueToConvert, 8); - configuredDirectoryPermissionMode = to!int(convertedValue); - } else { - // return default as octal integer - string valueToConvert = to!string(defaultDirectoryPermissionMode); - auto convertedValue = parse!long(valueToConvert, 8); - configuredDirectoryPermissionMode = to!int(convertedValue); - } - } - - void configureRequiredFilePermisions() { - // return the file permission mode required - // - return octal!defaultFilePermissionMode; ... cant be used .. which is odd - // Error: variable defaultFilePermissionMode cannot be read at compile time - if (getValueLong("sync_file_permissions") != defaultFilePermissionMode) { - // return user configured permissions as octal integer - string valueToConvert = to!string(getValueLong("sync_file_permissions")); - auto convertedValue = parse!long(valueToConvert, 8); - configuredFilePermissionMode = to!int(convertedValue); - } else { - // return default as octal integer - string valueToConvert = to!string(defaultFilePermissionMode); - auto convertedValue = parse!long(valueToConvert, 8); - configuredFilePermissionMode = to!int(convertedValue); + // Application Startup option validation + try { + string tmpStr; + bool tmpBol; + long tmpVerb; + // duplicated from main.d to get full help output! + auto opt = getopt( + + cliArgs, + std.getopt.config.bundling, + std.getopt.config.caseSensitive, + "auth-files", + "Perform authentication not via interactive dialog but via files read/writes to these files.", + &stringValues["auth_files"], + "auth-response", + "Perform authentication not via interactive dialog but via providing the response url directly.", + &stringValues["auth_response"], + "check-for-nomount", + "Check for the presence of .nosync in the syncdir root. If found, do not perform sync.", + &boolValues["check_nomount"], + "check-for-nosync", + "Check for the presence of .nosync in each directory. If found, skip directory from sync.", + &boolValues["check_nosync"], + "classify-as-big-delete", + "Number of children in a path that is locally removed which will be classified as a 'big data delete'", + &longValues["classify_as_big_delete"], + "cleanup-local-files", + "Cleanup additional local files when using --download-only. This will remove local data.", + &boolValues["cleanup_local_files"], + "create-directory", + "Create a directory on OneDrive - no sync will be performed.", + &stringValues["create_directory"], + "create-share-link", + "Create a shareable link for an existing file on OneDrive", + &stringValues["create_share_link"], + "debug-https", + "Debug OneDrive HTTPS communication.", + &boolValues["debug_https"], + "destination-directory", + "Destination directory for renamed or move on OneDrive - no sync will be performed.", + &stringValues["destination_directory"], + "disable-notifications", + "Do not use desktop notifications in monitor mode.", + &boolValues["disable_notifications"], + "disable-download-validation", + "Disable download validation when downloading from OneDrive", + &boolValues["disable_download_validation"], + "disable-upload-validation", + "Disable upload validation when uploading to OneDrive", + &boolValues["disable_upload_validation"], + "display-config", + "Display what options the client will use as currently configured - no sync will be performed.", + &boolValues["display_config"], + "display-running-config", + "Display what options the client has been configured to use on application startup.", + &boolValues["display_running_config"], + "display-sync-status", + "Display the sync status of the client - no sync will be performed.", + &boolValues["display_sync_status"], + "download-only", + "Replicate the OneDrive online state locally, by only downloading changes from OneDrive. Do not upload local changes to OneDrive.", + &boolValues["download_only"], + "dry-run", + "Perform a trial sync with no changes made", + &boolValues["dry_run"], + "enable-logging", + "Enable client activity to a separate log file", + &boolValues["enable_logging"], + "force-http-11", + "Force the use of HTTP 1.1 for all operations", + &boolValues["force_http_11"], + "force", + "Force the deletion of data when a 'big delete' is detected", + &boolValues["force"], + "force-sync", + "Force a synchronization of a specific folder, only when using --sync --single-directory and ignore all non-default skip_dir and skip_file rules", + &boolValues["force_sync"], + "get-file-link", + "Display the file link of a synced file", + &stringValues["get_file_link"], + "get-sharepoint-drive-id", + "Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library", + &stringValues["sharepoint_library_name"], + "get-O365-drive-id", + "Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library (DEPRECIATED)", + &stringValues["sharepoint_library_name"], + "local-first", + "Synchronize from the local directory source first, before downloading changes from OneDrive.", + &boolValues["local_first"], + "log-dir", + "Directory where logging output is saved to, needs to end with a slash.", + &stringValues["log_dir"], + "logout", + "Logout the current user", + &boolValues["logout"], + "modified-by", + "Display the last modified by details of a given path", + &stringValues["modified_by"], + "monitor|m", + "Keep monitoring for local and remote changes", + &boolValues["monitor"], + "monitor-interval", + "Number of seconds by which each sync operation is undertaken when idle under monitor mode.", + &longValues["monitor_interval"], + "monitor-fullscan-frequency", + "Number of sync runs before performing a full local scan of the synced directory", + &longValues["monitor_fullscan_frequency"], + "monitor-log-frequency", + "Frequency of logging in monitor mode", + &longValues["monitor_log_frequency"], + "no-remote-delete", + "Do not delete local file 'deletes' from OneDrive when using --upload-only", + &boolValues["no_remote_delete"], + "print-access-token", + "Print the access token, useful for debugging", + &boolValues["print_token"], + "reauth", + "Reauthenticate the client with OneDrive", + &boolValues["reauth"], + "resync", + "Forget the last saved state, perform a full sync", + &boolValues["resync"], + "resync-auth", + "Approve the use of performing a --resync action", + &boolValues["resync_auth"], + "remove-directory", + "Remove a directory on OneDrive - no sync will be performed.", + &stringValues["remove_directory"], + "remove-source-files", + "Remove source file after successful transfer to OneDrive when using --upload-only", + &boolValues["remove_source_files"], + "single-directory", + "Specify a single local directory within the OneDrive root to sync.", + &stringValues["single_directory"], + "skip-dot-files", + "Skip dot files and folders from syncing", + &boolValues["skip_dotfiles"], + "skip-file", + "Skip any files that match this pattern from syncing", + &stringValues["skip_file"], + "skip-dir", + "Skip any directories that match this pattern from syncing", + &stringValues["skip_dir"], + "skip-size", + "Skip new files larger than this size (in MB)", + &longValues["skip_size"], + "skip-dir-strict-match", + "When matching skip_dir directories, only match explicit matches", + &boolValues["skip_dir_strict_match"], + "skip-symlinks", + "Skip syncing of symlinks", + &boolValues["skip_symlinks"], + "source-directory", + "Source directory to rename or move on OneDrive - no sync will be performed.", + &stringValues["source_directory"], + "space-reservation", + "The amount of disk space to reserve (in MB) to avoid 100% disk space utilisation", + &longValues["space_reservation"], + "syncdir", + "Specify the local directory used for synchronization to OneDrive", + &stringValues["sync_dir"], + "sync|s", + "Perform a synchronisation with Microsoft OneDrive", + &boolValues["synchronize"], + "synchronize", + "Perform a synchronisation with Microsoft OneDrive (DEPRECIATED)", + &boolValues["synchronize"], + "sync-root-files", + "Sync all files in sync_dir root when using sync_list.", + &boolValues["sync_root_files"], + "upload-only", + "Replicate the locally configured sync_dir state to OneDrive, by only uploading local changes to OneDrive. Do not download changes from OneDrive.", + &boolValues["upload_only"], + "confdir", + "Set the directory used to store the configuration files", + &tmpStr, + "verbose|v+", + "Print more details, useful for debugging (repeat for extra debugging)", + &tmpVerb, + "version", + "Print the version and exit", + &tmpBol, + "with-editing-perms", + "Create a read-write shareable link for an existing file on OneDrive when used with --create-share-link ", + &boolValues["with_editing_perms"] + ); + + // Was --auth-files used? + if (!getValueString("auth_files").empty) { + // --auth-files used, need to validate that '~' was not used as a path identifier, and if yes, perform the correct expansion + string[] tempAuthFiles = getValueString("auth_files").split(":"); + string tempAuthUrl = tempAuthFiles[0]; + string tempResponseUrl = tempAuthFiles[1]; + string newAuthFilesString; + + // shell expansion if required + if (!shellEnvironmentSet){ + // No shell environment is set, no automatic expansion of '~' if present is possible + // Does the 'currently configured' tempAuthUrl include a ~ + if (canFind(tempAuthUrl, "~")) { + // A ~ was found in auth_files(authURL) + log.vdebug("auth_files: A '~' was found in 'auth_files(authURL)', using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set"); + tempAuthUrl = buildNormalizedPath(buildPath(defaultHomePath, strip(tempAuthUrl, "~"))); + } + + // Does the 'currently configured' tempAuthUrl include a ~ + if (canFind(tempResponseUrl, "~")) { + // A ~ was found in auth_files(authURL) + log.vdebug("auth_files: A '~' was found in 'auth_files(tempResponseUrl)', using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set"); + tempResponseUrl = buildNormalizedPath(buildPath(defaultHomePath, strip(tempResponseUrl, "~"))); + } + } else { + // Shell environment is set, automatic expansion of '~' if present is possible + // Does the 'currently configured' tempAuthUrl include a ~ + if (canFind(tempAuthUrl, "~")) { + // A ~ was found in auth_files(authURL) + log.vdebug("auth_files: A '~' was found in the configured 'auth_files(authURL)', automatically expanding as SHELL and USER environment variable is set"); + tempAuthUrl = expandTilde(tempAuthUrl); + } + + // Does the 'currently configured' tempAuthUrl include a ~ + if (canFind(tempResponseUrl, "~")) { + // A ~ was found in auth_files(authURL) + log.vdebug("auth_files: A '~' was found in the configured 'auth_files(tempResponseUrl)', automatically expanding as SHELL and USER environment variable is set"); + tempResponseUrl = expandTilde(tempResponseUrl); + } + } + + // Build new string + newAuthFilesString = tempAuthUrl ~ ":" ~ tempResponseUrl; + log.vdebug("auth_files - updated value: ", newAuthFilesString); + setValueString("auth_files", newAuthFilesString); + } + + if (opt.helpWanted) { + outputLongHelp(opt.options); + exit(EXIT_SUCCESS); + } + } catch (GetOptException e) { + log.error(e.msg); + log.error("Try 'onedrive -h' for more information"); + exit(EXIT_FAILURE); + } catch (Exception e) { + // error + log.error(e.msg); + log.error("Try 'onedrive -h' for more information"); + exit(EXIT_FAILURE); + } + } + + // Check the arguments passed in for any that will be depreciated + void checkDepreciatedOptions(string[] cliArgs) { + + bool depreciatedCommandsFound = false; + + foreach (cliArg; cliArgs) { + // Check each CLI arg for items that have been depreciated + + // --synchronize depreciated in v2.5.0, will be removed in future version + if (cliArg == "--synchronize") { + writeln(); + log.error("DEPRECIATION WARNING: --synchronize has been depreciated in favour of --sync or -s"); + depreciatedCommandsFound = true; + } + + // --get-O365-drive-id depreciated in v2.5.0, will be removed in future version + if (cliArg == "--get-O365-drive-id") { + writeln(); + log.error("DEPRECIATION WARNING: --get-O365-drive-id has been depreciated in favour of --get-sharepoint-drive-id"); + depreciatedCommandsFound = true; + } + } + + if (depreciatedCommandsFound) { + log.error("DEPRECIATION WARNING: Depreciated commands will be removed in a future release."); + writeln(); } } + + // Display the applicable application configuration + void displayApplicationConfiguration() { + if (getValueBool("display_running_config")) { + writeln("--------------- Application Runtime Configuration ---------------"); + } + + // Display application version + //writeln("onedrive version = ", strip(import("version"))); + + string tempVersion = "v2.5.0-alpha-2" ~ " GitHub version: " ~ strip(import("version")); + writeln("onedrive version = ", tempVersion); + + // Display all of the pertinent configuration options + writeln("Config path = ", configDirName); + // Does a config file exist or are we using application defaults + writeln("Config file found in config path = ", exists(applicableConfigFilePath)); + + // Is config option drive_id configured? + writeln("Config option 'drive_id' = ", getValueString("drive_id")); + + // Config Options as per 'config' file + writeln("Config option 'sync_dir' = ", getValueString("sync_dir")); + + // logging and notifications + writeln("Config option 'enable_logging' = ", getValueBool("enable_logging")); + writeln("Config option 'log_dir' = ", getValueString("log_dir")); + writeln("Config option 'disable_notifications' = ", getValueBool("disable_notifications")); + + // skip files and directory and 'matching' policy + writeln("Config option 'skip_dir' = ", getValueString("skip_dir")); + writeln("Config option 'skip_dir_strict_match' = ", getValueBool("skip_dir_strict_match")); + writeln("Config option 'skip_file' = ", getValueString("skip_file")); + writeln("Config option 'skip_dotfiles' = ", getValueBool("skip_dotfiles")); + writeln("Config option 'skip_symlinks' = ", getValueBool("skip_symlinks")); + + // --monitor sync process options + writeln("Config option 'monitor_interval' = ", getValueLong("monitor_interval")); + writeln("Config option 'monitor_log_frequency' = ", getValueLong("monitor_log_frequency")); + writeln("Config option 'monitor_fullscan_frequency' = ", getValueLong("monitor_fullscan_frequency")); + + // sync process and method + writeln("Config option 'read_only_auth_scope' = ", getValueBool("read_only_auth_scope")); + writeln("Config option 'dry_run' = ", getValueBool("dry_run")); + writeln("Config option 'upload_only' = ", getValueBool("upload_only")); + writeln("Config option 'download_only' = ", getValueBool("download_only")); + writeln("Config option 'local_first' = ", getValueBool("local_first")); + writeln("Config option 'check_nosync' = ", getValueBool("check_nosync")); + writeln("Config option 'check_nomount' = ", getValueBool("check_nomount")); + writeln("Config option 'resync' = ", getValueBool("resync")); + writeln("Config option 'resync_auth' = ", getValueBool("resync_auth")); + writeln("Config option 'cleanup_local_files' = ", getValueBool("cleanup_local_files")); - int returnRequiredDirectoryPermisions() { - // read the configuredDirectoryPermissionMode and return - if (configuredDirectoryPermissionMode == 0) { - // the configured value is zero, this means that directories would get - // values of d--------- - configureRequiredDirectoryPermisions(); + // data integrity + writeln("Config option 'classify_as_big_delete' = ", getValueLong("classify_as_big_delete")); + writeln("Config option 'disable_upload_validation' = ", getValueBool("disable_upload_validation")); + writeln("Config option 'disable_download_validation' = ", getValueBool("disable_download_validation")); + writeln("Config option 'bypass_data_preservation' = ", getValueBool("bypass_data_preservation")); + writeln("Config option 'no_remote_delete' = ", getValueBool("no_remote_delete")); + writeln("Config option 'remove_source_files' = ", getValueBool("remove_source_files")); + writeln("Config option 'sync_dir_permissions' = ", getValueLong("sync_dir_permissions")); + writeln("Config option 'sync_file_permissions' = ", getValueLong("sync_file_permissions")); + writeln("Config option 'space_reservation' = ", getValueLong("space_reservation")); + + // curl operations + writeln("Config option 'application_id' = ", getValueString("application_id")); + writeln("Config option 'azure_ad_endpoint' = ", getValueString("azure_ad_endpoint")); + writeln("Config option 'azure_tenant_id' = ", getValueString("azure_tenant_id")); + writeln("Config option 'user_agent' = ", getValueString("user_agent")); + writeln("Config option 'force_http_11' = ", getValueBool("force_http_11")); + writeln("Config option 'debug_https' = ", getValueBool("debug_https")); + writeln("Config option 'rate_limit' = ", getValueLong("rate_limit")); + writeln("Config option 'operation_timeout' = ", getValueLong("operation_timeout")); + writeln("Config option 'dns_timeout' = ", getValueLong("dns_timeout")); + writeln("Config option 'connect_timeout' = ", getValueLong("connect_timeout")); + writeln("Config option 'data_timeout' = ", getValueLong("data_timeout")); + writeln("Config option 'ip_protocol_version' = ", getValueLong("ip_protocol_version")); + + // Is sync_list configured ? + writeln("\nConfig option 'sync_root_files' = ", getValueBool("sync_root_files")); + if (exists(syncListFilePath)){ + + writeln("Selective sync 'sync_list' configured = true"); + writeln("sync_list contents:"); + // Output the sync_list contents + auto syncListFile = File(syncListFilePath, "r"); + auto range = syncListFile.byLine(); + foreach (line; range) + { + writeln(line); + } + } else { + writeln("Selective sync 'sync_list' configured = false"); + + } + + // Is sync_business_shared_items enabled and configured ? + writeln("\nConfig option 'sync_business_shared_items' = ", getValueBool("sync_business_shared_items")); + + if (exists(businessSharedItemsFilePath)){ + writeln("Selective Business Shared Items configured = true"); + writeln("sync_business_shared_items contents:"); + // Output the sync_business_shared_items contents + auto businessSharedItemsFileList = File(businessSharedItemsFilePath, "r"); + auto range = businessSharedItemsFileList.byLine(); + foreach (line; range) + { + writeln(line); + } + } else { + writeln("Selective Business Shared Items configured = false"); + } + + // Are webhooks enabled? + writeln("\nConfig option 'webhook_enabled' = ", getValueBool("webhook_enabled")); + if (getValueBool("webhook_enabled")) { + writeln("Config option 'webhook_public_url' = ", getValueString("webhook_public_url")); + writeln("Config option 'webhook_listening_host' = ", getValueString("webhook_listening_host")); + writeln("Config option 'webhook_listening_port' = ", getValueLong("webhook_listening_port")); + writeln("Config option 'webhook_expiration_interval' = ", getValueLong("webhook_expiration_interval")); + writeln("Config option 'webhook_renewal_interval' = ", getValueLong("webhook_renewal_interval")); + } + + if (getValueBool("display_running_config")) { + writeln("-----------------------------------------------------------------"); } - return configuredDirectoryPermissionMode; } + + // Prompt the user to accept the risk of using --resync + bool displayResyncRiskForAcceptance() { + // what is the user risk acceptance? + bool userRiskAcceptance = false; + + // Did the user use --resync-auth or 'resync_auth' in the config file to negate presenting this message? + if (!getValueBool("resync_auth")) { + // need to prompt user + char response; + + // --resync warning message + writeln("\nThe usage of --resync will delete your local 'onedrive' client state, thus no record of your current 'sync status' will exist."); + writeln("This has the potential to overwrite local versions of files with perhaps older versions of documents downloaded from OneDrive, resulting in local data loss."); + writeln("If in doubt, backup your local data before using --resync"); + write("\nAre you sure you wish to proceed with --resync? [Y/N] "); + + try { + // Attempt to read user response + string input = readln().strip; + if (input.length > 0) { + response = std.ascii.toUpper(input[0]); + } + } catch (std.format.FormatException e) { + userRiskAcceptance = false; + // Caught an error + return EXIT_FAILURE; + } + + // What did the user enter? + log.vdebug("--resync warning User Response Entered: ", (to!string(response))); + + // Evaluate user repsonse + if ((to!string(response) == "y") || (to!string(response) == "Y")) { + // User has accepted --resync risk to proceed + userRiskAcceptance = true; + // Are you sure you wish .. does not use writeln(); + write("\n"); + } + } else { + // resync_auth is true + userRiskAcceptance = true; + } + + // Return the --resync acceptance or not + return userRiskAcceptance; + } + + // Prompt the user to accept the risk of using --force-sync + bool displayForceSyncRiskForAcceptance() { + // what is the user risk acceptance? + bool userRiskAcceptance = false; + + // need to prompt user + char response; + + // --force-sync warning message + writeln("\nThe use of --force-sync will reconfigure the application to use defaults. This may have untold and unknown future impacts."); + writeln("By proceeding in using this option you accept any impacts including any data loss that may occur as a result of using --force-sync."); + write("\nAre you sure you wish to proceed with --force-sync [Y/N] "); + + try { + // Attempt to read user response + string input = readln().strip; + if (input.length > 0) { + response = std.ascii.toUpper(input[0]); + } + } catch (std.format.FormatException e) { + userRiskAcceptance = false; + // Caught an error + return EXIT_FAILURE; + } + + // What did the user enter? + log.vdebug("--force-sync warning User Response Entered: ", (to!string(response))); + + // Evaluate user repsonse + if ((to!string(response) == "y") || (to!string(response) == "Y")) { + // User has accepted --force-sync risk to proceed + userRiskAcceptance = true; + // Are you sure you wish .. does not use writeln(); + write("\n"); + } + + // Return the --resync acceptance or not + return userRiskAcceptance; + } + + // Check the application configuration for any changes that need to trigger a --resync + // This function is only called if --resync is not present + bool applicationChangeWhereResyncRequired() { + // Default is that no resync is required + bool resyncRequired = false; + + // Configuration File Flags + bool configFileOptionsDifferent = false; + bool syncListFileDifferent = false; + bool syncDirDifferent = false; + bool skipFileDifferent = false; + bool skipDirDifferent = false; + bool skipDotFilesDifferent = false; + bool skipSymbolicLinksDifferent = false; + bool driveIdDifferent = false; + bool syncBusinessSharedItemsDifferent = false; + bool businessSharedItemsFileDifferent = false; + + // Create the required initial hash files + createRequiredInitialConfigurationHashFiles(); + + // Read in the existing hash file values + readExistingConfigurationHashFiles(); + + // Was the 'sync_list' file updated? + if (currentSyncListHash != previousSyncListHash) { + // Debugging output to assist what changed + log.vdebug("sync_list file has been updated, --resync needed"); + syncListFileDifferent = true; + } + + // Was the 'business_shared_items' file updated? + if (currentBusinessSharedItemsHash != previousBusinessSharedItemsHash) { + // Debugging output to assist what changed + log.vdebug("business_shared_folders file has been updated, --resync needed"); + businessSharedItemsFileDifferent = true; + } + + // Was the 'config' file updated between last execution and this execution? + if (currentConfigHash != previousConfigHash) { + // config file was updated, however we only want to trigger a --resync requirement if sync_dir, skip_dir, skip_file or drive_id was modified + log.log("Application configuration file has been updated, checking if --resync needed"); + log.vdebug("Using this configBackupFile: ", configBackupFile); + + if (exists(configBackupFile)) { + // check backup config what has changed for these configuration options if anything + // # drive_id = "" + // # sync_dir = "~/OneDrive" + // # skip_file = "~*|.~*|*.tmp|*.swp|*.partial" + // # skip_dir = "" + // # skip_dotfiles = "" + // # skip_symlinks = "" + // # sync_business_shared_items = "" + string[string] backupConfigStringValues; + backupConfigStringValues["drive_id"] = ""; + backupConfigStringValues["sync_dir"] = ""; + backupConfigStringValues["skip_file"] = ""; + backupConfigStringValues["skip_dir"] = ""; + backupConfigStringValues["skip_dotfiles"] = ""; + backupConfigStringValues["skip_symlinks"] = ""; + backupConfigStringValues["sync_business_shared_items"] = ""; + + // bool flags to trigger if the entries that trigger a --resync were found in the backup config file + // if these were not in the backup file, they may have been added ... thus new, thus we need to double check the existing + // config file to see if this was a newly added config option + bool drive_id_present = false; + bool sync_dir_present = false; + bool skip_file_present = false; + bool skip_dir_present = false; + bool skip_dotfiles_present = false; + bool skip_symlinks_present = false; + bool sync_business_shared_items_present = false; + + // Common debug message if an element is different + string configOptionModifiedMessage = " was modified since the last time the application was successfully run, --resync required"; + + auto configBackupFileHandle = File(configBackupFile, "r"); + string lineBuffer; + + // read configBackupFile line by line + auto range = configBackupFileHandle.byLine(); + // for each line + foreach (line; range) { + log.vdebug("Backup Config Line: ", lineBuffer); + lineBuffer = stripLeft(line).to!string; + if (lineBuffer.length == 0 || lineBuffer[0] == ';' || lineBuffer[0] == '#') continue; + auto c = lineBuffer.matchFirst(configRegex); + if (!c.empty) { + c.popFront(); // skip the whole match + string key = c.front.dup; + log.vdebug("Backup Config Key: ", key); + auto p = key in backupConfigStringValues; + if (p) { + c.popFront(); + // compare this key + if (key == "drive_id") { + drive_id_present = true; + if (c.front.dup != getValueString("drive_id")) { + log.vdebug(key, configOptionModifiedMessage); + configFileOptionsDifferent = true; + } + } + + if (key == "sync_dir") { + sync_dir_present = true; + if (c.front.dup != getValueString("sync_dir")) { + log.vdebug(key, configOptionModifiedMessage); + configFileOptionsDifferent = true; + } + } + + if (key == "skip_file") { + skip_file_present = true; + string computedBackupSkipFile = defaultSkipFile ~ "|" ~ to!string(c.front.dup); + if (computedBackupSkipFile != getValueString("skip_file")) { + log.vdebug(key, configOptionModifiedMessage); + configFileOptionsDifferent = true; + } + } + + if (key == "skip_dir") { + skip_dir_present = true; + if (c.front.dup != getValueString("skip_dir")) { + log.vdebug(key, configOptionModifiedMessage); + configFileOptionsDifferent = true; + } + } + + if (key == "skip_dotfiles") { + skip_dotfiles_present = true; + if (c.front.dup != to!string(getValueBool("skip_dotfiles"))) { + log.vdebug(key, configOptionModifiedMessage); + configFileOptionsDifferent = true; + } + } + + if (key == "skip_symlinks") { + skip_symlinks_present = true; + if (c.front.dup != to!string(getValueBool("skip_symlinks"))) { + log.vdebug(key, configOptionModifiedMessage); + configFileOptionsDifferent = true; + } + } + + if (key == "sync_business_shared_items") { + sync_business_shared_items_present = true; + if (c.front.dup != to!string(getValueBool("sync_business_shared_items"))) { + log.vdebug(key, configOptionModifiedMessage); + configFileOptionsDifferent = true; + } + } + } + } + } + + // close file if open + if (configBackupFileHandle.isOpen()) { + // close open file + configBackupFileHandle.close(); + } + + // Were any of the items that trigger a --resync not in the existing backup 'config' file .. thus newly added? + if ((!drive_id_present) || (!sync_dir_present) || (! skip_file_present) || (!skip_dir_present) || (!skip_dotfiles_present) || (!skip_symlinks_present)) { + log.vdebug("drive_id present in config backup: ", drive_id_present); + log.vdebug("sync_dir present in config backup: ", sync_dir_present); + log.vdebug("skip_file present in config backup: ", skip_file_present); + log.vdebug("skip_dir present in config backup: ", skip_dir_present); + log.vdebug("skip_dotfiles present in config backup: ", skip_dotfiles_present); + log.vdebug("skip_symlinks present in config backup: ", skip_symlinks_present); + log.vdebug("sync_business_shared_items present in config backup: ", sync_business_shared_items_present); + + if ((!drive_id_present) && (configFileDriveId != "")) { + writeln("drive_id newly added ... --resync needed"); + configFileOptionsDifferent = true; + driveIdDifferent = true; + } + + if ((!sync_dir_present) && (configFileSyncDir != defaultSyncDir)) { + writeln("sync_dir newly added ... --resync needed"); + configFileOptionsDifferent = true; + syncDirDifferent = true; + } + + if ((!skip_file_present) && (configFileSkipFile != defaultSkipFile)) { + writeln("skip_file newly added ... --resync needed"); + configFileOptionsDifferent = true; + skipFileDifferent = true; + } + + if ((!skip_dir_present) && (configFileSkipDir != "")) { + writeln("skip_dir newly added ... --resync needed"); + configFileOptionsDifferent = true; + skipFileDifferent = true; + } + + if ((!skip_dotfiles_present) && (configFileSkipDotfiles)) { + writeln("skip_dotfiles newly added ... --resync needed"); + configFileOptionsDifferent = true; + skipDotFilesDifferent = true; + } + + if ((!skip_symlinks_present) && (configFileSkipSymbolicLinks)) { + writeln("skip_symlinks newly added ... --resync needed"); + configFileOptionsDifferent = true; + skipSymbolicLinksDifferent = true; + } + + if ((!sync_business_shared_items_present) && (configFileSyncBusinessSharedItems)) { + writeln("sync_business_shared_items newly added ... --resync needed"); + configFileOptionsDifferent = true; + syncBusinessSharedItemsDifferent = true; + } + } + } else { + // no backup to check + log.log("WARNING: no backup config file was found, unable to validate if any changes made"); + } + } + + // config file set options can be changed via CLI input, specifically these will impact sync and a --resync will be needed: + // --syncdir ARG + // --skip-file ARG + // --skip-dir ARG + // --skip-dot-files + // --skip-symlinks + + if (exists(applicableConfigFilePath)) { + // config file exists + // was the sync_dir updated by CLI? + if (configFileSyncDir != "") { + // sync_dir was set in config file + if (configFileSyncDir != getValueString("sync_dir")) { + // config file was set and CLI input changed this + log.vdebug("sync_dir: CLI override of config file option, --resync needed"); + syncDirDifferent = true; + } + } + + // was the skip_file updated by CLI? + if (configFileSkipFile != "") { + // skip_file was set in config file + if (configFileSkipFile != getValueString("skip_file")) { + // config file was set and CLI input changed this + log.vdebug("skip_file: CLI override of config file option, --resync needed"); + skipFileDifferent = true; + } + } - int returnRequiredFilePermisions() { - // read the configuredFilePermissionMode and return - if (configuredFilePermissionMode == 0) { - // the configured value is zero + // was the skip_dir updated by CLI? + if (configFileSkipDir != "") { + // skip_dir was set in config file + if (configFileSkipDir != getValueString("skip_dir")) { + // config file was set and CLI input changed this + log.vdebug("skip_dir: CLI override of config file option, --resync needed"); + skipDirDifferent = true; + } + } + + // was skip_dotfiles updated by --skip-dot-files ? + if (!configFileSkipDotfiles) { + // was not set in config file + if (getValueBool("skip_dotfiles")) { + // --skip-dot-files passed in + log.vdebug("skip_dotfiles: CLI override of config file option, --resync needed"); + skipDotFilesDifferent = true; + } + } + + // was skip_symlinks updated by --skip-symlinks ? + if (!configFileSkipSymbolicLinks) { + // was not set in config file + if (getValueBool("skip_symlinks")) { + // --skip-symlinks passed in + log.vdebug("skip_symlinks: CLI override of config file option, --resync needed"); + skipSymbolicLinksDifferent = true; + } + } + } + + // Did any of the config files or CLI options trigger a --resync requirement? + log.vdebug("configFileOptionsDifferent: ", configFileOptionsDifferent); + // Options + log.vdebug("driveIdDifferent: ", driveIdDifferent); + log.vdebug("syncDirDifferent: ", syncDirDifferent); + log.vdebug("skipFileDifferent: ", skipFileDifferent); + log.vdebug("skipDirDifferent: ", skipDirDifferent); + log.vdebug("skipDotFilesDifferent: ", skipDotFilesDifferent); + log.vdebug("skipSymbolicLinksDifferent: ", skipSymbolicLinksDifferent); + log.vdebug("syncBusinessSharedItemsDifferent: ", syncBusinessSharedItemsDifferent); + // Files + log.vdebug("syncListFileDifferent: ", syncListFileDifferent); + log.vdebug("businessSharedItemsFileDifferent: ", businessSharedItemsFileDifferent); + + if ((configFileOptionsDifferent) || (syncListFileDifferent) || (businessSharedItemsFileDifferent) || (syncDirDifferent) || (skipFileDifferent) || (skipDirDifferent) || (driveIdDifferent) || (skipDotFilesDifferent) || (skipSymbolicLinksDifferent) || (syncBusinessSharedItemsDifferent) ) { + // set the flag + resyncRequired = true; + } + return resyncRequired; + } + + // Cleanup hash files that require to be cleaned up when a --resync is issued + void cleanupHashFilesDueToResync() { + if (!getValueBool("dry_run")) { + // cleanup hash files + log.vdebug("Cleaning up configuration hash files"); + safeRemove(configHashFile); + safeRemove(syncListHashFile); + safeRemove(businessSharedItemsHashFile); + } else { + // --dry-run scenario ... technically we should not be making any local file changes ....... + log.log("DRY RUN: Not removing hash files as --dry-run has been used"); + } + } + + // For each of the config files, update the hash data in the hash files + void updateHashContentsForConfigFiles() { + // Are we in a --dry-run scenario? + if (!getValueBool("dry_run")) { + // Not a dry-run scenario, update the applicable files + // Update applicable 'config' files + if (exists(applicableConfigFilePath)) { + // Update the hash of the applicable config file + log.vdebug("Updating applicable config file hash"); + std.file.write(configHashFile, computeQuickXorHash(applicableConfigFilePath)); + // Hash file should only be readable by the user who created it - 0600 permissions needed + configHashFile.setAttributes(convertedPermissionValue); + } + // Update 'sync_list' files + if (exists(syncListFilePath)) { + // update sync_list hash + log.vdebug("Updating sync_list hash"); + std.file.write(syncListHashFile, computeQuickXorHash(syncListFilePath)); + // Hash file should only be readable by the user who created it - 0600 permissions needed + syncListHashFile.setAttributes(convertedPermissionValue); + } + + + // Update 'update business_shared_items' files + if (exists(businessSharedItemsFilePath)) { + // update business_shared_folders hash + log.vdebug("Updating business_shared_items hash"); + std.file.write(businessSharedItemsHashFile, computeQuickXorHash(businessSharedItemsFilePath)); + // Hash file should only be readable by the user who created it - 0600 permissions needed + businessSharedItemsHashFile.setAttributes(convertedPermissionValue); + } + + } else { + // --dry-run scenario ... technically we should not be making any local file changes ....... + log.log("DRY RUN: Not updating hash files as --dry-run has been used"); + } + } + + // Create any required hash files for files that help us determine if the configuration has changed since last run + void createRequiredInitialConfigurationHashFiles() { + // Does a 'config' file exist with a valid hash file + if (exists(applicableConfigFilePath)) { + if (!exists(configHashFile)) { + // no existing hash file exists + std.file.write(configHashFile, "initial-hash"); + // Hash file should only be readable by the user who created it - 0600 permissions needed + configHashFile.setAttributes(convertedPermissionValue); + } + // Generate the runtime hash for the 'config' file + currentConfigHash = computeQuickXorHash(applicableConfigFilePath); + } + + // Does a 'sync_list' file exist with a valid hash file + if (exists(syncListFilePath)) { + if (!exists(syncListHashFile)) { + // no existing hash file exists + std.file.write(syncListHashFile, "initial-hash"); + // Hash file should only be readable by the user who created it - 0600 permissions needed + syncListHashFile.setAttributes(convertedPermissionValue); + } + // Generate the runtime hash for the 'sync_list' file + currentSyncListHash = computeQuickXorHash(syncListFilePath); + } + + // Does a 'business_shared_items' file exist with a valid hash file + if (exists(businessSharedItemsFilePath)) { + if (!exists(businessSharedItemsHashFile)) { + // no existing hash file exists + std.file.write(businessSharedItemsHashFile, "initial-hash"); + // Hash file should only be readable by the user who created it - 0600 permissions needed + businessSharedItemsHashFile.setAttributes(convertedPermissionValue); + } + // Generate the runtime hash for the 'sync_list' file + currentBusinessSharedItemsHash = computeQuickXorHash(businessSharedItemsFilePath); + } + } + + // Read in the text values of the previous configurations + int readExistingConfigurationHashFiles() { + if (exists(configHashFile)) { + try { + previousConfigHash = readText(configHashFile); + } catch (std.file.FileException e) { + // Unable to access required file + log.error("ERROR: Unable to access ", e.msg); + // Use exit scopes to shutdown API + return EXIT_FAILURE; + } + } + + if (exists(syncListHashFile)) { + try { + previousSyncListHash = readText(syncListHashFile); + } catch (std.file.FileException e) { + // Unable to access required file + log.error("ERROR: Unable to access ", e.msg); + // Use exit scopes to shutdown API + return EXIT_FAILURE; + } + } + if (exists(businessSharedItemsHashFile)) { + try { + previousBusinessSharedItemsHash = readText(businessSharedItemsHashFile); + } catch (std.file.FileException e) { + // Unable to access required file + log.error("ERROR: Unable to access ", e.msg); + // Use exit scopes to shutdown API + return EXIT_FAILURE; + } + } + return 0; + } + + // Check for basic option conflicts - flags that should not be used together and/or flag combinations that conflict with each other + bool checkForBasicOptionConflicts() { + + bool operationalConflictDetected = false; + + // What are the permission that have been set for the application? + // These are relevant for: + // - The ~/OneDrive parent folder or 'sync_dir' configured item + // - Any new folder created under ~/OneDrive or 'sync_dir' + // - Any new file created under ~/OneDrive or 'sync_dir' + // valid permissions are 000 -> 777 - anything else is invalid + if ((getValueLong("sync_dir_permissions") < 0) || (getValueLong("sync_file_permissions") < 0) || (getValueLong("sync_dir_permissions") > 777) || (getValueLong("sync_file_permissions") > 777)) { + log.error("ERROR: Invalid 'User|Group|Other' permissions set within config file. Please check your configuration."); + operationalConflictDetected = true; + } else { + // Debug log output what permissions are being set to + log.vdebug("Configuring default new folder permissions as: ", getValueLong("sync_dir_permissions")); + configureRequiredDirectoryPermisions(); + log.vdebug("Configuring default new file permissions as: ", getValueLong("sync_file_permissions")); configureRequiredFilePermisions(); } - return configuredFilePermissionMode; + + // --upload-only and --download-only cannot be used together + if ((getValueBool("upload_only")) && (getValueBool("download_only"))) { + log.error("ERROR: --upload-only and --download-only cannot be used together. Use one, not both at the same time."); + operationalConflictDetected = true; + } + + // --sync and --monitor cannot be used together + if ((getValueBool("synchronize")) && (getValueBool("monitor"))) { + log.error("ERROR: --sync and --monitor cannot be used together. Use one, not both at the same time."); + operationalConflictDetected = true; + } + + // --no-remote-delete can ONLY be enabled when --upload-only is used + if ((getValueBool("no_remote_delete")) && (!getValueBool("upload_only"))) { + log.error("ERROR: --no-remote-delete can only be used with --upload-only."); + operationalConflictDetected = true; + } + + // --remove-source-files can ONLY be enabled when --upload-only is used + if ((getValueBool("remove_source_files")) && (!getValueBool("upload_only"))) { + log.error("ERROR: --remove-source-files can only be used with --upload-only."); + operationalConflictDetected = true; + } + + // --cleanup-local-files can ONLY be enabled when --download-only is used + if ((getValueBool("cleanup_local_files")) && (!getValueBool("download_only"))) { + log.error("ERROR: --cleanup-local-files can only be used with --download-only."); + operationalConflictDetected = true; + } + + // --list-shared-folders cannot be used with --resync and/or --resync-auth + if ((getValueBool("list_business_shared_items")) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + log.error("ERROR: --list-shared-folders cannot be used with --resync or --resync-auth."); + operationalConflictDetected = true; + } + + // --display-sync-status cannot be used with --resync and/or --resync-auth + if ((getValueBool("display_sync_status")) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + log.error("ERROR: --display-sync-status cannot be used with --resync or --resync-auth."); + operationalConflictDetected = true; + } + + // --modified-by cannot be used with --resync and/or --resync-auth + if ((!getValueString("modified_by").empty) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + log.error("ERROR: --modified-by cannot be used with --resync or --resync-auth."); + operationalConflictDetected = true; + } + + // --get-file-link cannot be used with --resync and/or --resync-auth + if ((!getValueString("get_file_link").empty) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + log.error("ERROR: --get-file-link cannot be used with --resync or --resync-auth."); + operationalConflictDetected = true; + } + + // --create-share-link cannot be used with --resync and/or --resync-auth + if ((!getValueString("create_share_link").empty) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + log.error("ERROR: --create-share-link cannot be used with --resync or --resync-auth."); + operationalConflictDetected = true; + } + + // --get-sharepoint-drive-id cannot be used with --resync and/or --resync-auth + if ((!getValueString("sharepoint_library_name").empty) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + log.error("ERROR: --get-sharepoint-drive-id cannot be used with --resync or --resync-auth."); + operationalConflictDetected = true; + } + + // --monitor and --display-sync-status cannot be used together + if ((getValueBool("monitor")) && (getValueBool("display_sync_status"))) { + log.error("ERROR: --monitor and --display-sync-status cannot be used together."); + operationalConflictDetected = true; + } + + // --sync and and --display-sync-status cannot be used together + if ((getValueBool("synchronize")) && (getValueBool("display_sync_status"))) { + log.error("ERROR: --sync and and --display-sync-status cannot be used together."); + operationalConflictDetected = true; + } + + // --force-sync can only be used when using --sync --single-directory + if (getValueBool("force_sync")) { + + bool conflict = false; + // Should not be used with --monitor + if (getValueBool("monitor")) conflict = true; + // single_directory must not be empty + if (getValueString("single_directory").empty) conflict = true; + if (conflict) { + log.error("ERROR: --force-sync can only be used with --sync --single-directory."); + operationalConflictDetected = true; + } + } + + // When using 'azure_ad_endpoint', 'azure_tenant_id' cannot be empty + if ((!getValueString("azure_ad_endpoint").empty) && (getValueString("azure_tenant_id").empty)) { + log.error("ERROR: config option 'azure_tenant_id' cannot be empty when 'azure_ad_endpoint' is configured."); + operationalConflictDetected = true; + } + + // When using --enable-logging the 'log_dir' cannot be empty + if ((getValueBool("enable_logging")) && (getValueString("log_dir").empty)) { + log.error("ERROR: config option 'log_dir' cannot be empty when 'enable_logging' is configured."); + operationalConflictDetected = true; + } + + // When using --syncdir, the value cannot be empty. + if (strip(getValueString("sync_dir")).empty) { + log.error("ERROR: --syncdir value cannot be empty."); + operationalConflictDetected = true; + } + + // --monitor and --create-directory cannot be used together + if ((getValueBool("monitor")) && (!getValueString("create_directory").empty)) { + log.error("ERROR: --monitor and --create-directory cannot be used together."); + operationalConflictDetected = true; + } + + // --sync and --create-directory cannot be used together + if ((getValueBool("synchronize")) && (!getValueString("create_directory").empty)) { + log.error("ERROR: --sync and --create-directory cannot be used together."); + operationalConflictDetected = true; + } + + // --monitor and --remove-directory cannot be used together + if ((getValueBool("monitor")) && (!getValueString("remove_directory").empty)) { + log.error("ERROR: --monitor and --remove-directory cannot be used together."); + operationalConflictDetected = true; + } + + // --sync and --remove-directory cannot be used together + if ((getValueBool("synchronize")) && (!getValueString("remove_directory").empty)) { + log.error("ERROR: --sync and --remove-directory cannot be used together."); + operationalConflictDetected = true; + } + + // --monitor and --source-directory cannot be used together + if ((getValueBool("monitor")) && (!getValueString("source_directory").empty)) { + log.error("ERROR: --monitor and --source-directory cannot be used together."); + operationalConflictDetected = true; + } + + // --sync and --source-directory cannot be used together + if ((getValueBool("synchronize")) && (!getValueString("source_directory").empty)) { + log.error("ERROR: --sync and --source-directory cannot be used together."); + operationalConflictDetected = true; + } + + // --monitor and --destination-directory cannot be used together + if ((getValueBool("monitor")) && (!getValueString("destination_directory").empty)) { + log.error("ERROR: --monitor and --destination-directory cannot be used together."); + operationalConflictDetected = true; + } + + // --sync and --destination-directory cannot be used together + if ((getValueBool("synchronize")) && (!getValueString("destination_directory").empty)) { + log.error("ERROR: --sync and --destination-directory cannot be used together."); + operationalConflictDetected = true; + } + + // Return bool value indicating if we have an operational conflict + return operationalConflictDetected; } + // Reset skip_file and skip_dir to application defaults when --force-sync is used void resetSkipToDefaults() { - // reset skip_file and skip_dir to application defaults // skip_file log.vdebug("original skip_file: ", getValueString("skip_file")); - log.vdebug("resetting skip_file"); + log.vdebug("resetting skip_file to application defaults"); setValueString("skip_file", defaultSkipFile); log.vdebug("reset skip_file: ", getValueString("skip_file")); // skip_dir log.vdebug("original skip_dir: ", getValueString("skip_dir")); - log.vdebug("resetting skip_dir"); + log.vdebug("resetting skip_dir to application defaults"); setValueString("skip_dir", defaultSkipDir); log.vdebug("reset skip_dir: ", getValueString("skip_dir")); } + + // Initialise the correct 'sync_dir' expanding any '~' if present + string initialiseRuntimeSyncDirectory() { + + string runtimeSyncDirectory; + + log.vdebug("sync_dir: Setting runtimeSyncDirectory from config value 'sync_dir'"); + + if (!shellEnvironmentSet){ + log.vdebug("sync_dir: No SHELL or USER environment variable configuration detected"); + // No shell or user set, so expandTilde() will fail - usually headless system running under init.d / systemd or potentially Docker + // Does the 'currently configured' sync_dir include a ~ + if (canFind(getValueString("sync_dir"), "~")) { + // A ~ was found in sync_dir + log.vdebug("sync_dir: A '~' was found in 'sync_dir', using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set"); + runtimeSyncDirectory = buildNormalizedPath(buildPath(defaultHomePath, strip(getValueString("sync_dir"), "~"))); + } else { + // No ~ found in sync_dir, use as is + log.vdebug("sync_dir: Using configured 'sync_dir' path as-is as no SHELL or USER environment variable configuration detected"); + runtimeSyncDirectory = getValueString("sync_dir"); + } + } else { + // A shell and user environment variable is set, expand any ~ as this will be expanded correctly if present + if (canFind(getValueString("sync_dir"), "~")) { + log.vdebug("sync_dir: A '~' was found in the configured 'sync_dir', automatically expanding as SHELL and USER environment variable is set"); + runtimeSyncDirectory = expandTilde(getValueString("sync_dir")); + } else { + // No ~ found in sync_dir, does the path begin with a '/' ? + log.vdebug("sync_dir: Using configured 'sync_dir' path as-is as however SHELL or USER environment variable configuration detected - should be placed in USER home directory"); + if (!startsWith(getValueString("sync_dir"), "/")) { + log.vdebug("Configured 'sync_dir' does not start with a '/' or '~/' - adjusting configured 'sync_dir' to use User Home Directory as base for 'sync_dir' path"); + string updatedPathWithHome = "~/" ~ getValueString("sync_dir"); + runtimeSyncDirectory = expandTilde(updatedPathWithHome); + } else { + log.vdebug("use 'sync_dir' as is - no touch"); + runtimeSyncDirectory = getValueString("sync_dir"); + } + } + } + + // What will runtimeSyncDirectory be actually set to? + log.vdebug("runtimeSyncDirectory set to: ", runtimeSyncDirectory); + return runtimeSyncDirectory; + } + + // Initialise the correct 'log_dir' when application logging to a separate file is enabled with 'enable_logging' and expanding any '~' if present + string initialiseLogDirectory() { + + string initialisedLogDirPath; + + log.vdebug("log_dir: Setting runtime application log from config value 'log_dir'"); + + if (getValueString("log_dir") != defaultLogFileDir) { + // User modified 'log_dir' to be used with 'enable_logging' + // if 'log_dir' contains a '~' this needs to be expanded correctly + if (canFind(getValueString("log_dir"), "~")) { + // ~ needs to be expanded correctly + if (!shellEnvironmentSet) { + // No shell or user environment variable set, so expandTilde() will fail - usually headless system running under init.d / systemd or potentially Docker + log.vdebug("log_dir: A '~' was found in log_dir, using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set"); + initialisedLogDirPath = buildNormalizedPath(buildPath(defaultHomePath, strip(getValueString("log_dir"), "~"))); + } else { + // A shell and user environment variable is set, expand any ~ as this will be expanded correctly if present + log.vdebug("log_dir: A '~' was found in the configured 'log_dir', automatically expanding as SHELL and USER environment variable is set"); + initialisedLogDirPath = expandTilde(getValueString("log_dir")); + } + } else { + // '~' not found in log_dir entry, use as is + initialisedLogDirPath = getValueString("log_dir"); + } + } else { + // Default 'log_dir' to be used with 'enable_logging' + initialisedLogDirPath = defaultLogFileDir; + } + + // Return the initialised application log path + return initialisedLogDirPath; + } } -void outputLongHelp(Option[] opt) -{ - auto argsNeedingOptions = [ - "--auth-files", - "--auth-response", - "--confdir", - "--create-directory", - "--create-share-link", - "--destination-directory", - "--get-file-link", - "--get-O365-drive-id", - "--log-dir", - "--min-notify-changes", - "--modified-by", - "--monitor-interval", - "--monitor-log-frequency", - "--monitor-fullscan-frequency", - "--operation-timeout", - "--remove-directory", - "--single-directory", - "--skip-dir", - "--skip-file", - "--skip-size", - "--source-directory", - "--space-reservation", - "--syncdir", - "--user-agent" ]; - writeln(`OneDrive - a client for OneDrive Cloud Services +// Output the full application help when --help is passed in +void outputLongHelp(Option[] opt) { + auto argsNeedingOptions = [ + "--auth-files", + "--auth-response", + "--confdir", + "--create-directory", + "--classify-as-big-delete", + "--create-share-link", + "--destination-directory", + "--get-file-link", + "--get-O365-drive-id", + "--log-dir", + "--min-notify-changes", + "--modified-by", + "--monitor-interval", + "--monitor-log-frequency", + "--monitor-fullscan-frequency", + "--operation-timeout", + "--remove-directory", + "--single-directory", + "--skip-dir", + "--skip-file", + "--skip-size", + "--source-directory", + "--space-reservation", + "--syncdir", + "--user-agent" ]; + writeln(`OneDrive - a client for OneDrive Cloud Services -Usage: - onedrive [options] --synchronize - Do a one time synchronization - onedrive [options] --monitor - Monitor filesystem and sync regularly - onedrive [options] --display-config - Display the currently used configuration - onedrive [options] --display-sync-status - Query OneDrive service and report on pending changes - onedrive -h | --help - Show this help screen - onedrive --version - Show version + Usage: + onedrive [options] --sync + Do a one time synchronization + onedrive [options] --monitor + Monitor filesystem and sync regularly + onedrive [options] --display-config + Display the currently used configuration + onedrive [options] --display-sync-status + Query OneDrive service and report on pending changes + onedrive -h | --help + Show this help screen + onedrive --version + Show version -Options: -`); - foreach (it; opt.sort!("a.optLong < b.optLong")) { - writefln(" %s%s%s%s\n %s", - it.optLong, - it.optShort == "" ? "" : " " ~ it.optShort, - argsNeedingOptions.canFind(it.optLong) ? " ARG" : "", - it.required ? " (required)" : "", it.help); - } -} - -unittest -{ - auto cfg = new Config(""); - cfg.load("config"); - assert(cfg.getValueString("sync_dir") == "~/OneDrive"); -} + Options: + `); + foreach (it; opt.sort!("a.optLong < b.optLong")) { + writefln(" %s%s%s%s\n %s", + it.optLong, + it.optShort == "" ? "" : " " ~ it.optShort, + argsNeedingOptions.canFind(it.optLong) ? " ARG" : "", + it.required ? " (required)" : "", it.help); + } +} \ No newline at end of file diff --git a/src/curlEngine.d b/src/curlEngine.d new file mode 100644 index 000000000..c56d1efc8 --- /dev/null +++ b/src/curlEngine.d @@ -0,0 +1,111 @@ +// What is this module called? +module curlEngine; + +// What does this module require to function? +import std.net.curl; +import etc.c.curl: CurlOption; +import std.datetime; + +// What other modules that we have created do we need to import? +import log; + +import std.stdio; + +class CurlEngine { + HTTP http; + + this() { + http = HTTP(); + } + + void initialise(long dnsTimeout, long connectTimeout, long dataTimeout, long operationTimeout, int maxRedirects, bool httpsDebug, string userAgent, bool httpProtocol, long userRateLimit, long protocolVersion) { + // Curl Timeout Handling + + // libcurl dns_cache_timeout timeout + // https://curl.se/libcurl/c/CURLOPT_DNS_CACHE_TIMEOUT.html + // https://dlang.org/library/std/net/curl/http.dns_timeout.html + http.dnsTimeout = (dur!"seconds"(dnsTimeout)); + + // Timeout for HTTPS connections + // https://curl.se/libcurl/c/CURLOPT_CONNECTTIMEOUT.html + // https://dlang.org/library/std/net/curl/http.connect_timeout.html + http.connectTimeout = (dur!"seconds"(connectTimeout)); + + // Timeout for activity on connection + // This is a DMD | DLANG specific item, not a libcurl item + // https://dlang.org/library/std/net/curl/http.data_timeout.html + // https://raw.githubusercontent.com/dlang/phobos/master/std/net/curl.d - private enum _defaultDataTimeout = dur!"minutes"(2); + http.dataTimeout = (dur!"seconds"(dataTimeout)); + + // Maximum time any operation is allowed to take + // This includes dns resolution, connecting, data transfer, etc. + // https://curl.se/libcurl/c/CURLOPT_TIMEOUT_MS.html + // https://dlang.org/library/std/net/curl/http.operation_timeout.html + http.operationTimeout = (dur!"seconds"(operationTimeout)); + + // Specify how many redirects should be allowed + http.maxRedirects(maxRedirects); + // Debug HTTPS + http.verbose = httpsDebug; + // Use the configured 'user_agent' value + http.setUserAgent = userAgent; + // What IP protocol version should be used when using Curl - IPv4 & IPv6, IPv4 or IPv6 + http.handle.set(CurlOption.ipresolve,protocolVersion); // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only + + // What version of HTTP protocol do we use? + // Curl >= 7.62.0 defaults to http2 for a significant number of operations + if (httpProtocol) { + // Downgrade to HTTP 1.1 - yes version = 2 is HTTP 1.1 + http.handle.set(CurlOption.http_version,2); + } + + // Configure upload / download rate limits if configured + // 131072 = 128 KB/s - minimum for basic application operations to prevent timeouts + // A 0 value means rate is unlimited, and is the curl default + if (userRateLimit > 0) { + // set rate limit + http.handle.set(CurlOption.max_send_speed_large,userRateLimit); + http.handle.set(CurlOption.max_recv_speed_large,userRateLimit); + } + + // Explicitly set these libcurl options + // https://curl.se/libcurl/c/CURLOPT_NOSIGNAL.html + // Ensure that nosignal is set to 0 - Setting CURLOPT_NOSIGNAL to 0 makes libcurl ask the system to ignore SIGPIPE signals + http.handle.set(CurlOption.nosignal,0); + + // https://curl.se/libcurl/c/CURLOPT_TCP_NODELAY.html + // Ensure that TCP_NODELAY is set to 0 to ensure that TCP NAGLE is enabled + http.handle.set(CurlOption.tcp_nodelay,0); + + // https://curl.se/libcurl/c/CURLOPT_FORBID_REUSE.html + // CURLOPT_FORBID_REUSE - make connection get closed at once after use + // Ensure that we ARE NOT reusing TCP sockets connections - setting to 0 ensures that we ARE reusing connections (we did this in v2.4.xx) to ensure connections remained open and usable + // Setting this to 1 ensures that when we close the curl instance, any open sockets are closed - which we need to do when running + // multiple threads and API instances at the same time otherwise we run out of local files | sockets pretty quickly + // The libcurl default is 1 - ensure we are configuring not to reuse connections and leave unused sockets open + http.handle.set(CurlOption.forbid_reuse,1); + + if (httpsDebug) { + // Output what options we are using so that in the debug log this can be tracked + log.vdebug("http.dnsTimeout = ", dnsTimeout); + log.vdebug("http.connectTimeout = ", connectTimeout); + log.vdebug("http.dataTimeout = ", dataTimeout); + log.vdebug("http.operationTimeout = ", operationTimeout); + log.vdebug("http.maxRedirects = ", maxRedirects); + log.vdebug("http.CurlOption.ipresolve = ", protocolVersion); + } + } + + void setMethodPost() { + http.method = HTTP.Method.post; + } + + void setMethodPatch() { + http.method = HTTP.Method.patch; + } + + void setDisableSSLVerifyPeer() { + log.vdebug("Switching off CurlOption.ssl_verifypeer"); + http.handle.set(CurlOption.ssl_verifypeer, 0); + } +} \ No newline at end of file diff --git a/src/itemdb.d b/src/itemdb.d index 28fc47121..f1663a0d8 100644 --- a/src/itemdb.d +++ b/src/itemdb.d @@ -1,3 +1,7 @@ +// What is this module called? +module itemdb; + +// What does this module require to function? import std.datetime; import std.exception; import std.path; @@ -5,19 +9,26 @@ import std.string; import std.stdio; import std.algorithm.searching; import core.stdc.stdlib; +import std.json; +import std.conv; + +// What other modules that we have created do we need to import? import sqlite; -static import log; +import util; +import log; enum ItemType { file, dir, - remote + remote, + unknown } struct Item { string driveId; string id; string name; + string remoteName; ItemType type; string eTag; string cTag; @@ -28,23 +39,144 @@ struct Item { string remoteDriveId; string remoteId; string syncStatus; + string size; +} + +// Construct an Item struct from a JSON driveItem +Item makeDatabaseItem(JSONValue driveItem) { + + Item item = { + id: driveItem["id"].str, + name: "name" in driveItem ? driveItem["name"].str : null, // name may be missing for deleted files in OneDrive Business + eTag: "eTag" in driveItem ? driveItem["eTag"].str : null, // eTag is not returned for the root in OneDrive Business + cTag: "cTag" in driveItem ? driveItem["cTag"].str : null, // cTag is missing in old files (and all folders in OneDrive Business) + remoteName: "actualOnlineName" in driveItem ? driveItem["actualOnlineName"].str : null, // actualOnlineName is only used with OneDrive Business Shared Folders + }; + + // OneDrive API Change: https://github.com/OneDrive/onedrive-api-docs/issues/834 + // OneDrive no longer returns lastModifiedDateTime if the item is deleted by OneDrive + if(isItemDeleted(driveItem)) { + // Set mtime to SysTime(0) + item.mtime = SysTime(0); + } else { + // Item is not in a deleted state + // Resolve 'Key not found: fileSystemInfo' when then item is a remote item + // https://github.com/abraunegg/onedrive/issues/11 + if (isItemRemote(driveItem)) { + // remoteItem is a OneDrive object that exists on a 'different' OneDrive drive id, when compared to account default + // Normally, the 'remoteItem' field will contain 'fileSystemInfo' however, if the user uses the 'Add Shortcut ..' option in OneDrive WebUI + // to create a 'link', this object, whilst remote, does not have 'fileSystemInfo' in the expected place, thus leading to a application crash + // See: https://github.com/abraunegg/onedrive/issues/1533 + if ("fileSystemInfo" in driveItem["remoteItem"]) { + // 'fileSystemInfo' is in 'remoteItem' which will be the majority of cases + item.mtime = SysTime.fromISOExtString(driveItem["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"].str); + } else { + // is a remote item, but 'fileSystemInfo' is missing from 'remoteItem' + if ("fileSystemInfo" in driveItem) { + item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str); + } + } + } else { + // Does fileSystemInfo exist at all ? + if ("fileSystemInfo" in driveItem) { + item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str); + } + } + } + + // Set this item object type + bool typeSet = false; + if (isItemFile(driveItem)) { + // 'file' object exists in the JSON + log.vdebug("Flagging object as a file"); + typeSet = true; + item.type = ItemType.file; + } + + if (isItemFolder(driveItem)) { + // 'folder' object exists in the JSON + log.vdebug("Flagging object as a directory"); + typeSet = true; + item.type = ItemType.dir; + } + + if (isItemRemote(driveItem)) { + // 'remote' object exists in the JSON + log.vdebug("Flagging object as a remote"); + typeSet = true; + item.type = ItemType.remote; + } + + // root and remote items do not have parentReference + if (!isItemRoot(driveItem) && ("parentReference" in driveItem) != null) { + item.driveId = driveItem["parentReference"]["driveId"].str; + if (hasParentReferenceId(driveItem)) { + item.parentId = driveItem["parentReference"]["id"].str; + } + } + + // extract the file hash and file size + if (isItemFile(driveItem) && ("hashes" in driveItem["file"])) { + // Get file size + if (hasFileSize(driveItem)) { + item.size = to!string(driveItem["size"].integer); + // Get quickXorHash as default + if ("quickXorHash" in driveItem["file"]["hashes"]) { + item.quickXorHash = driveItem["file"]["hashes"]["quickXorHash"].str; + } else { + log.vdebug("quickXorHash is missing from ", driveItem["id"].str); + } + + // If quickXorHash is empty .. + if (item.quickXorHash.empty) { + // Is there a sha256Hash? + if ("sha256Hash" in driveItem["file"]["hashes"]) { + item.sha256Hash = driveItem["file"]["hashes"]["sha256Hash"].str; + } else { + log.vdebug("sha256Hash is missing from ", driveItem["id"].str); + } + } + } else { + // So that we have at least a zero value here as the API provided no 'size' data for this file item + item.size = "0"; + } + } + + // Is the object a remote drive item - living on another driveId ? + if (isItemRemote(driveItem)) { + item.remoteDriveId = driveItem["remoteItem"]["parentReference"]["driveId"].str; + item.remoteId = driveItem["remoteItem"]["id"].str; + } + + // We have 3 different operational modes where 'item.syncStatus' is used to flag if an item is synced or not: + // - National Cloud Deployments do not support /delta as a query + // - When using --single-directory + // - When using --download-only --cleanup-local-files + // + // Thus we need to track in the database that this item is in sync + // As we are making an item, set the syncStatus to Y + // ONLY when either of the three modes above are being used, all the existing DB entries will get set to N + // so when processing /children, it can be identified what the 'deleted' difference is + item.syncStatus = "Y"; + + // Return the created item + return item; } -final class ItemDatabase -{ +final class ItemDatabase { // increment this for every change in the db schema - immutable int itemDatabaseVersion = 11; + immutable int itemDatabaseVersion = 12; Database db; string insertItemStmt; string updateItemStmt; string selectItemByIdStmt; + string selectItemByRemoteIdStmt; string selectItemByParentIdStmt; string deleteItemByIdStmt; bool databaseInitialised = false; - this(const(char)[] filename) - { + this(const(char)[] filename) { db = Database(filename); int dbVersion; try { @@ -99,12 +231,12 @@ final class ItemDatabase db.exec("PRAGMA locking_mode = EXCLUSIVE"); insertItemStmt = " - INSERT OR REPLACE INTO item (driveId, id, name, type, eTag, cTag, mtime, parentId, quickXorHash, sha256Hash, remoteDriveId, remoteId, syncStatus) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13) + INSERT OR REPLACE INTO item (driveId, id, name, remoteName, type, eTag, cTag, mtime, parentId, quickXorHash, sha256Hash, remoteDriveId, remoteId, syncStatus, size) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15) "; updateItemStmt = " UPDATE item - SET name = ?3, type = ?4, eTag = ?5, cTag = ?6, mtime = ?7, parentId = ?8, quickXorHash = ?9, sha256Hash = ?10, remoteDriveId = ?11, remoteId = ?12, syncStatus = ?13 + SET name = ?3, remoteName = ?4, type = ?5, eTag = ?6, cTag = ?7, mtime = ?8, parentId = ?9, quickXorHash = ?10, sha256Hash = ?11, remoteDriveId = ?12, remoteId = ?13, syncStatus = ?14, size = ?15 WHERE driveId = ?1 AND id = ?2 "; selectItemByIdStmt = " @@ -112,6 +244,11 @@ final class ItemDatabase FROM item WHERE driveId = ?1 AND id = ?2 "; + selectItemByRemoteIdStmt = " + SELECT * + FROM item + WHERE remoteDriveId = ?1 AND remoteId = ?2 + "; selectItemByParentIdStmt = "SELECT * FROM item WHERE driveId = ? AND parentId = ?"; deleteItemByIdStmt = "DELETE FROM item WHERE driveId = ? AND id = ?"; @@ -119,17 +256,16 @@ final class ItemDatabase databaseInitialised = true; } - bool isDatabaseInitialised() - { + bool isDatabaseInitialised() { return databaseInitialised; } - void createTable() - { + void createTable() { db.exec("CREATE TABLE item ( driveId TEXT NOT NULL, id TEXT NOT NULL, name TEXT NOT NULL, + remoteName TEXT, type TEXT NOT NULL, eTag TEXT, cTag TEXT, @@ -141,6 +277,7 @@ final class ItemDatabase remoteId TEXT, deltaLink TEXT, syncStatus TEXT, + size TEXT, PRIMARY KEY (driveId, id), FOREIGN KEY (driveId, parentId) REFERENCES item (driveId, id) @@ -154,32 +291,27 @@ final class ItemDatabase db.setVersion(itemDatabaseVersion); } - void insert(const ref Item item) - { + void insert(const ref Item item) { auto p = db.prepare(insertItemStmt); bindItem(item, p); p.exec(); } - void update(const ref Item item) - { + void update(const ref Item item) { auto p = db.prepare(updateItemStmt); bindItem(item, p); p.exec(); } - void dump_open_statements() - { + void dump_open_statements() { db.dump_open_statements(); } - int db_checkpoint() - { + int db_checkpoint() { return db.db_checkpoint(); } - void upsert(const ref Item item) - { + void upsert(const ref Item item) { auto s = db.prepare("SELECT COUNT(*) FROM item WHERE driveId = ? AND id = ?"); s.bind(1, item.driveId); s.bind(2, item.id); @@ -191,8 +323,7 @@ final class ItemDatabase stmt.exec(); } - Item[] selectChildren(const(char)[] driveId, const(char)[] id) - { + Item[] selectChildren(const(char)[] driveId, const(char)[] id) { auto p = db.prepare(selectItemByParentIdStmt); p.bind(1, driveId); p.bind(2, id); @@ -205,8 +336,7 @@ final class ItemDatabase return items; } - bool selectById(const(char)[] driveId, const(char)[] id, out Item item) - { + bool selectById(const(char)[] driveId, const(char)[] id, out Item item) { auto p = db.prepare(selectItemByIdStmt); p.bind(1, driveId); p.bind(2, id); @@ -218,9 +348,20 @@ final class ItemDatabase return false; } + bool selectByRemoteId(const(char)[] remoteDriveId, const(char)[] remoteId, out Item item) { + auto p = db.prepare(selectItemByRemoteIdStmt); + p.bind(1, remoteDriveId); + p.bind(2, remoteId); + auto r = p.exec(); + if (!r.empty) { + item = buildItem(r); + return true; + } + return false; + } + // returns true if an item id is in the database - bool idInLocalDatabase(const(string) driveId, const(string)id) - { + bool idInLocalDatabase(const(string) driveId, const(string)id) { auto p = db.prepare(selectItemByIdStmt); p.bind(1, driveId); p.bind(2, id); @@ -233,8 +374,7 @@ final class ItemDatabase // returns the item with the given path // the path is relative to the sync directory ex: "./Music/Turbo Killer.mp3" - bool selectByPath(const(char)[] path, string rootDriveId, out Item item) - { + bool selectByPath(const(char)[] path, string rootDriveId, out Item item) { Item currItem = { driveId: rootDriveId }; // Issue https://github.com/abraunegg/onedrive/issues/578 @@ -254,6 +394,7 @@ final class ItemDatabase auto r = s.exec(); if (r.empty) return false; currItem = buildItem(r); + // if the item is of type remote substitute it with the child if (currItem.type == ItemType.remote) { Item child; @@ -268,8 +409,7 @@ final class ItemDatabase } // same as selectByPath() but it does not traverse remote folders - bool selectByPathWithoutRemote(const(char)[] path, string rootDriveId, out Item item) - { + bool selectByPathWithoutRemote(const(char)[] path, string rootDriveId, out Item item) { Item currItem = { driveId: rootDriveId }; // Issue https://github.com/abraunegg/onedrive/issues/578 @@ -294,58 +434,80 @@ final class ItemDatabase return true; } - void deleteById(const(char)[] driveId, const(char)[] id) - { + void deleteById(const(char)[] driveId, const(char)[] id) { auto p = db.prepare(deleteItemByIdStmt); p.bind(1, driveId); p.bind(2, id); p.exec(); } - private void bindItem(const ref Item item, ref Statement stmt) - { + private void bindItem(const ref Item item, ref Statement stmt) { with (stmt) with (item) { bind(1, driveId); bind(2, id); bind(3, name); + bind(4, remoteName); string typeStr = null; final switch (type) with (ItemType) { case file: typeStr = "file"; break; case dir: typeStr = "dir"; break; case remote: typeStr = "remote"; break; + case unknown: typeStr = "unknown"; break; } - bind(4, typeStr); - bind(5, eTag); - bind(6, cTag); - bind(7, mtime.toISOExtString()); - bind(8, parentId); - bind(9, quickXorHash); - bind(10, sha256Hash); - bind(11, remoteDriveId); - bind(12, remoteId); - bind(13, syncStatus); + bind(5, typeStr); + bind(6, eTag); + bind(7, cTag); + bind(8, mtime.toISOExtString()); + bind(9, parentId); + bind(10, quickXorHash); + bind(11, sha256Hash); + bind(12, remoteDriveId); + bind(13, remoteId); + bind(14, syncStatus); + bind(15, size); } } - private Item buildItem(Statement.Result result) - { + private Item buildItem(Statement.Result result) { assert(!result.empty, "The result must not be empty"); - assert(result.front.length == 14, "The result must have 14 columns"); + assert(result.front.length == 16, "The result must have 16 columns"); Item item = { + + // column 0: driveId + // column 1: id + // column 2: name + // column 3: remoteName - only used when there is a difference in the local name & remote shared folder name + // column 4: type + // column 5: eTag + // column 6: cTag + // column 7: mtime + // column 8: parentId + // column 9: quickXorHash + // column 10: sha256Hash + // column 11: remoteDriveId + // column 12: remoteId + // column 13: deltaLink + // column 14: syncStatus + // column 15: size + driveId: result.front[0].dup, id: result.front[1].dup, name: result.front[2].dup, - eTag: result.front[4].dup, - cTag: result.front[5].dup, - mtime: SysTime.fromISOExtString(result.front[6]), - parentId: result.front[7].dup, - quickXorHash: result.front[8].dup, - sha256Hash: result.front[9].dup, - remoteDriveId: result.front[10].dup, - remoteId: result.front[11].dup, - syncStatus: result.front[12].dup + remoteName: result.front[3].dup, + // Column 4 is type - not set here + eTag: result.front[5].dup, + cTag: result.front[6].dup, + mtime: SysTime.fromISOExtString(result.front[7]), + parentId: result.front[8].dup, + quickXorHash: result.front[9].dup, + sha256Hash: result.front[10].dup, + remoteDriveId: result.front[11].dup, + remoteId: result.front[12].dup, + // Column 13 is deltaLink - not set here + syncStatus: result.front[14].dup, + size: result.front[15].dup }; - switch (result.front[3]) { + switch (result.front[4]) { case "file": item.type = ItemType.file; break; case "dir": item.type = ItemType.dir; break; case "remote": item.type = ItemType.remote; break; @@ -357,8 +519,7 @@ final class ItemDatabase // computes the path of the given item id // the path is relative to the sync directory ex: "Music/Turbo Killer.mp3" // the trailing slash is not added even if the item is a directory - string computePath(const(char)[] driveId, const(char)[] id) - { + string computePath(const(char)[] driveId, const(char)[] id) { assert(driveId && id); string path; Item item; @@ -416,8 +577,7 @@ final class ItemDatabase return path; } - Item[] selectRemoteItems() - { + Item[] selectRemoteItems() { Item[] items; auto stmt = db.prepare("SELECT * FROM item WHERE remoteDriveId IS NOT NULL"); auto res = stmt.exec(); @@ -428,8 +588,11 @@ final class ItemDatabase return items; } - string getDeltaLink(const(char)[] driveId, const(char)[] id) - { + string getDeltaLink(const(char)[] driveId, const(char)[] id) { + // Log what we received + log.vdebug("DeltaLink Query (driveId): ", driveId); + log.vdebug("DeltaLink Query (id): ", id); + assert(driveId && id); auto stmt = db.prepare("SELECT deltaLink FROM item WHERE driveId = ?1 AND id = ?2"); stmt.bind(1, driveId); @@ -439,8 +602,7 @@ final class ItemDatabase return res.front[0].dup; } - void setDeltaLink(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink) - { + void setDeltaLink(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink) { assert(driveId && id); assert(deltaLink); auto stmt = db.prepare("UPDATE item SET deltaLink = ?3 WHERE driveId = ?1 AND id = ?2"); @@ -455,8 +617,7 @@ final class ItemDatabase // As we query /children to get all children from OneDrive, update anything in the database // to be flagged as not-in-sync, thus, we can use that flag to determing what was previously // in-sync, but now deleted on OneDrive - void downgradeSyncStatusFlag(const(char)[] driveId, const(char)[] id) - { + void downgradeSyncStatusFlag(const(char)[] driveId, const(char)[] id) { assert(driveId); auto stmt = db.prepare("UPDATE item SET syncStatus = 'N' WHERE driveId = ?1 AND id = ?2"); stmt.bind(1, driveId); @@ -466,8 +627,7 @@ final class ItemDatabase // National Cloud Deployments (US and DE) do not support /delta as a query // Select items that have a out-of-sync flag set - Item[] selectOutOfSyncItems(const(char)[] driveId) - { + Item[] selectOutOfSyncItems(const(char)[] driveId) { assert(driveId); Item[] items; auto stmt = db.prepare("SELECT * FROM item WHERE syncStatus = 'N' AND driveId = ?1"); @@ -482,8 +642,7 @@ final class ItemDatabase // OneDrive Business Folders are stored in the database potentially without a root | parentRoot link // Select items associated with the provided driveId - Item[] selectByDriveId(const(char)[] driveId) - { + Item[] selectByDriveId(const(char)[] driveId) { assert(driveId); Item[] items; auto stmt = db.prepare("SELECT * FROM item WHERE driveId = ?1 AND parentId IS NULL"); @@ -496,9 +655,23 @@ final class ItemDatabase return items; } + // Select all items associated with the provided driveId + Item[] selectAllItemsByDriveId(const(char)[] driveId) { + assert(driveId); + Item[] items; + auto stmt = db.prepare("SELECT * FROM item WHERE driveId = ?1"); + stmt.bind(1, driveId); + auto res = stmt.exec(); + while (!res.empty) { + items ~= buildItem(res); + res.step(); + } + return items; + } + // Perform a vacuum on the database, commit WAL / SHM to file - void performVacuum() - { + void performVacuum() { + log.vdebug("Attempting to perform a database vacuum to merge any temporary data"); try { auto stmt = db.prepare("VACUUM;"); stmt.exec(); @@ -510,8 +683,7 @@ final class ItemDatabase } // Select distinct driveId items from database - string[] selectDistinctDriveIds() - { + string[] selectDistinctDriveIds() { string[] driveIdArray; auto stmt = db.prepare("SELECT DISTINCT driveId FROM item;"); auto res = stmt.exec(); @@ -522,4 +694,4 @@ final class ItemDatabase } return driveIdArray; } -} +} \ No newline at end of file diff --git a/src/log.d b/src/log.d index b7aa0da68..12afa1403 100644 --- a/src/log.d +++ b/src/log.d @@ -1,31 +1,47 @@ +// What is this module called? +module log; + +// What does this module require to function? import std.stdio; import std.file; import std.datetime; import std.process; import std.conv; +import std.path; +import std.string; import core.memory; -import core.sys.posix.pwd, core.sys.posix.unistd, core.stdc.string : strlen; +import core.sys.posix.pwd; +import core.sys.posix.unistd; +import core.stdc.string : strlen; import std.algorithm : splitter; + version(Notifications) { import dnotify; } -// enable verbose logging +// module variables +// verbose logging count long verbose; +// do we write a log file? ... this should be a config falue bool writeLogFile = false; +// did the log file write fail? bool logFileWriteFailFlag = false; - -private bool doNotifications; +private bool triggerNotification; // shared string variable for username string username; string logFilePath; +string logFileName; +string logFileFullPath; -void init(string logDir) -{ +void initialise(string logDir) { writeLogFile = true; + + // Configure various variables username = getUserName(); logFilePath = logDir; + logFileName = username ~ ".onedrive.log"; + logFileFullPath = buildPath(logFilePath, logFileName); if (!exists(logFilePath)){ // logfile path does not exist @@ -34,15 +50,19 @@ void init(string logDir) } catch (std.file.FileException e) { // we got an error .. - writeln("\nUnable to access ", logFilePath); - writeln("Please manually create '",logFilePath, "' and set appropriate permissions to allow write access"); - writeln("The requested client activity log will instead be located in your users home directory"); + writeln(); + writeln("ERROR: Unable to access ", logFilePath); + writeln("ERROR: Please manually create '",logFilePath, "' and set appropriate permissions to allow write access"); + writeln("ERROR: The requested client activity log will instead be located in your users home directory"); + writeln(); + + // set the flag so we dont keep printing this sort of message + logFileWriteFailFlag = true; } } } -void setNotifications(bool value) -{ +void enableNotifications(bool value) { version(Notifications) { // if we try to enable notifications, check for server availability // and disable in case dbus server is not reachable @@ -54,11 +74,10 @@ void setNotifications(bool value) } } } - doNotifications = value; + triggerNotification = value; } -void log(T...)(T args) -{ +void log(T...)(T args) { writeln(args); if(writeLogFile){ // Write to log file @@ -66,22 +85,19 @@ void log(T...)(T args) } } -void logAndNotify(T...)(T args) -{ +void logAndNotify(T...)(T args) { notify(args); log(args); } -void fileOnly(T...)(T args) -{ +void fileOnly(T...)(T args) { if(writeLogFile){ // Write to log file logfileWriteLine(args); } } -void vlog(T...)(T args) -{ +void vlog(T...)(T args) { if (verbose >= 1) { writeln(args); if(writeLogFile){ @@ -91,8 +107,7 @@ void vlog(T...)(T args) } } -void vdebug(T...)(T args) -{ +void vdebug(T...)(T args) { if (verbose >= 2) { writeln("[DEBUG] ", args); if(writeLogFile){ @@ -102,8 +117,7 @@ void vdebug(T...)(T args) } } -void vdebugNewLine(T...)(T args) -{ +void vdebugNewLine(T...)(T args) { if (verbose >= 2) { writeln("\n[DEBUG] ", args); if(writeLogFile){ @@ -113,8 +127,7 @@ void vdebugNewLine(T...)(T args) } } -void error(T...)(T args) -{ +void error(T...)(T args) { stderr.writeln(args); if(writeLogFile){ // Write to log file @@ -122,16 +135,14 @@ void error(T...)(T args) } } -void errorAndNotify(T...)(T args) -{ +void errorAndNotify(T...)(T args) { notify(args); error(args); } -void notify(T...)(T args) -{ +void notify(T...)(T args) { version(Notifications) { - if (doNotifications) { + if (triggerNotification) { string result; foreach (index, arg; args) { result ~= to!string(arg); @@ -153,45 +164,44 @@ void notify(T...)(T args) } } -private void logfileWriteLine(T...)(T args) -{ +private void logfileWriteLine(T...)(T args) { static import std.exception; // Write to log file - string logFileName = .logFilePath ~ .username ~ ".onedrive.log"; auto currentTime = Clock.currTime(); - auto timeString = currentTime.toString(); + auto timeString = leftJustify(currentTime.toString(), 28, '0'); File logFile; // Resolve: std.exception.ErrnoException@std/stdio.d(423): Cannot open file `/var/log/onedrive/xxxxx.onedrive.log' in mode `a' (Permission denied) try { - logFile = File(logFileName, "a"); + logFile = File(logFileFullPath, "a"); } catch (std.exception.ErrnoException e) { - // We cannot open the log file in logFilePath location for writing + // We cannot open the log file logFileFullPath for writing // The user is not part of the standard 'users' group (GID 100) // Change logfile to ~/onedrive.log putting the log file in the users home directory if (!logFileWriteFailFlag) { // write out error message that we cant log to the requested file - writeln("\nUnable to write activity log to ", logFileName); - writeln("Please set appropriate permissions to allow write access to the logging directory for your user account"); - writeln("The requested client activity log will instead be located in your users home directory\n"); + writeln(); + writeln("ERROR: Unable to write activity log to ", logFileFullPath); + writeln("ERROR: Please set appropriate permissions to allow write access to the logging directory for your user account"); + writeln("ERROR: The requested client activity log will instead be located in your users home directory"); + writeln(); // set the flag so we dont keep printing this error message logFileWriteFailFlag = true; } string homePath = environment.get("HOME"); - string logFileNameAlternate = homePath ~ "/onedrive.log"; - logFile = File(logFileNameAlternate, "a"); + string logFileFullPathAlternate = homePath ~ "/onedrive.log"; + logFile = File(logFileFullPathAlternate, "a"); } // Write to the log file logFile.writeln(timeString, "\t", args); logFile.close(); } -private string getUserName() -{ +private string getUserName() { auto pw = getpwuid(getuid); // get required details @@ -216,24 +226,27 @@ private string getUserName() } } -void displayMemoryUsagePreGC() -{ -// Display memory usage -writeln("\nMemory Usage pre GC (bytes)"); -writeln("--------------------"); -writeln("memory usedSize = ", GC.stats.usedSize); -writeln("memory freeSize = ", GC.stats.freeSize); -// uncomment this if required, if not using LDC 1.16 as this does not exist in that version -//writeln("memory allocatedInCurrentThread = ", GC.stats.allocatedInCurrentThread, "\n"); +void displayMemoryUsagePreGC() { + // Display memory usage + writeln(); + writeln("Memory Usage pre GC (KB)"); + writeln("------------------------"); + writeMemoryStats(); + writeln(); } -void displayMemoryUsagePostGC() -{ -// Display memory usage -writeln("\nMemory Usage post GC (bytes)"); -writeln("--------------------"); -writeln("memory usedSize = ", GC.stats.usedSize); -writeln("memory freeSize = ", GC.stats.freeSize); -// uncomment this if required, if not using LDC 1.16 as this does not exist in that version -//writeln("memory allocatedInCurrentThread = ", GC.stats.allocatedInCurrentThread, "\n"); +void displayMemoryUsagePostGC() { + // Display memory usage + writeln(); + writeln("Memory Usage post GC (KB)"); + writeln("-------------------------"); + writeMemoryStats(); + writeln(); } + +void writeMemoryStats() { + // write memory stats + writeln("memory usedSize = ", (GC.stats.usedSize/1024)); + writeln("memory freeSize = ", (GC.stats.freeSize/1024)); + writeln("memory allocatedInCurrentThread = ", (GC.stats.allocatedInCurrentThread/1024)); +} \ No newline at end of file diff --git a/src/main.d b/src/main.d index 688cd1d57..9281c835e 100644 --- a/src/main.d +++ b/src/main.d @@ -1,148 +1,129 @@ +// What is this module called? +module main; + +// What does this module require to function? import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit; -import core.memory, core.time, core.thread; -import std.getopt, std.file, std.path, std.process, std.stdio, std.conv, std.algorithm.searching, std.string, std.regex; -import config, itemdb, monitor, onedrive, selective, sync, util; -import std.net.curl: CurlException; import core.stdc.signal; -import std.traits, std.format; -import std.concurrency: receiveTimeout; +import core.memory; +import core.time; +import core.thread; +import std.stdio; +import std.getopt; +import std.string; +import std.file; +import std.process; +import std.algorithm; +import std.path; +import std.concurrency; +import std.parallelism; +import std.conv; +import std.traits; +import std.net.curl: CurlException; import std.datetime; -static import log; -OneDriveApi oneDrive; -ItemDatabase itemDb; - -bool onedriveInitialised = false; -const int EXIT_UNAUTHORIZED = 3; +// What other modules that we have created do we need to import? +import config; +import log; +import curlEngine; +import util; +import onedrive; +import syncEngine; +import itemdb; +import clientSideFiltering; +import monitor; + +// What other constant variables do we require? const int EXIT_RESYNC_REQUIRED = 126; -enum MONITOR_LOG_SILENT = 2; -enum MONITOR_LOG_QUIET = 1; -enum LOG_NORMAL = 0; +// Class objects +ApplicationConfig appConfig; +OneDriveApi oneDriveApiInstance; +SyncEngine syncEngineInstance; +ItemDatabase itemDB; +ClientSideFiltering selectiveSync; -int main(string[] args) -{ - // Disable buffering on stdout +int main(string[] cliArgs) { + // Disable buffering on stdout - this is needed so that when we are using plain write() it will go to the terminal stdout.setvbuf(0, _IONBF); - - // main function variables - string confdirOption; - string configFilePath; - string syncListFilePath; - string databaseFilePath; - string businessSharedFolderFilePath; - string currentConfigHash; - string currentSyncListHash; - string previousConfigHash; - string previousSyncListHash; - string configHashFile; - string syncListHashFile; - string configBackupFile; - string syncDir; - string logOutputMessage; - string currentBusinessSharedFoldersHash; - string previousBusinessSharedFoldersHash; - string businessSharedFoldersHashFile; - string databaseFilePathDryRunGlobal; - bool configOptionsDifferent = false; - bool businessSharedFoldersDifferent = false; - bool syncListConfigured = false; - bool syncListDifferent = false; - bool syncDirDifferent = false; - bool skipFileDifferent = false; - bool skipDirDifferent = false; + // Required main function variables + string genericHelpMessage = "Try 'onedrive --help' for more information"; + // If the user passes in --confdir we need to store this as a variable + string confdirOption = ""; + // Are we online? bool online = false; - bool performSyncOK = false; + // Does the operating environment have shell environment variables set + bool shellEnvSet = false; + // What is the runtime syncronisation directory that will be used + // Typically this will be '~/OneDrive' .. however tilde expansion is unreliable + string runtimeSyncDirectory = ""; + // Configure the runtime database file path. Typically this will be the default, but in a --dry-run scenario, we use a separate database file + string runtimeDatabaseFile = ""; + + // Application Start Time - used during monitor loop to detail how long it has been running for + auto applicationStartTime = Clock.currTime(); + + // DEVELOPER OPTIONS OUTPUT VARIABLES bool displayMemoryUsage = false; bool displaySyncOptions = false; - bool cleanupLocalFilesGlobal = false; - bool synchronizeConfigured = false; - bool invalidSyncExit = false; - - // start and finish messages - string startMessage = "Starting a sync with OneDrive"; - string finishMessage = "Sync with OneDrive is complete"; - string helpMessage = "Please use 'onedrive --help' for further assistance in regards to running this application."; - // hash file permission values - string hashPermissionValue = "600"; - auto convertedPermissionValue = parse!long(hashPermissionValue, 8); - - // Define scopes + // Define 'exit' and 'failure' scopes scope(exit) { // detail what scope was called - log.vdebug("Exit scope called"); - if (synchronizeConfigured) { - log.log(finishMessage); - } - // Display memory details - if (displayMemoryUsage) { - log.displayMemoryUsagePreGC(); - } - // if initialised, shut down the HTTP instance - if (onedriveInitialised) { - oneDrive.shutdown(); - } - // was itemDb initialised? - if (itemDb !is null) { + log.vdebug("Exit scope was called"); + + // Was itemDB initialised? + if (itemDB !is null) { // Make sure the .wal file is incorporated into the main db before we exit - if(!invalidSyncExit) { - itemDb.performVacuum(); - } - destroy(itemDb); - } - // cleanup any dry-run data - cleanupDryRunDatabase(databaseFilePathDryRunGlobal); - // free API instance - if (oneDrive !is null) { - destroy(oneDrive); + itemDB.performVacuum(); + object.destroy(itemDB); } - // Perform Garbage Cleanup - GC.collect(); - // Display memory details - if (displayMemoryUsage) { - log.displayMemoryUsagePostGC(); + + // Free other objects and memory + if (appConfig !is null) { + // Cleanup any existing dry-run elements ... these should never be left hanging around + cleanupDryRunDatabaseFiles(appConfig.databaseFilePathDryRun); + object.destroy(appConfig); } + if (oneDriveApiInstance !is null) object.destroy(oneDriveApiInstance); + if (selectiveSync !is null) object.destroy(selectiveSync); + if (syncEngineInstance !is null) object.destroy(syncEngineInstance); } - + scope(failure) { // detail what scope was called - log.vdebug("Failure scope called"); - // Display memory details - if (displayMemoryUsage) { - log.displayMemoryUsagePreGC(); - } - // if initialised, shut down the HTTP instance - if (onedriveInitialised) { - oneDrive.shutdown(); - } - // was itemDb initialised? - if (itemDb !is null) { + log.vdebug("Failure scope was called"); + + // Was itemDB initialised? + if (itemDB !is null) { // Make sure the .wal file is incorporated into the main db before we exit - if(!invalidSyncExit) { - itemDb.performVacuum(); - } - destroy(itemDb); - } - // cleanup any dry-run data - cleanupDryRunDatabase(databaseFilePathDryRunGlobal); - // free API instance - if (oneDrive !is null) { - destroy(oneDrive); - } - // Perform Garbage Cleanup - GC.collect(); - // Display memory details - if (displayMemoryUsage) { - log.displayMemoryUsagePostGC(); + itemDB.performVacuum(); + object.destroy(itemDB); } + + // Free other objects and memory + if (appConfig !is null) { + // Cleanup any existing dry-run elements ... these should never be left hanging around + cleanupDryRunDatabaseFiles(appConfig.databaseFilePathDryRun); + object.destroy(appConfig); + } + if (oneDriveApiInstance !is null) object.destroy(oneDriveApiInstance); + if (selectiveSync !is null) object.destroy(selectiveSync); + if (syncEngineInstance !is null) object.destroy(syncEngineInstance); + + // Set these to be null due to failure scope - prevent 'ERROR: Unable to perform a database vacuum: out of memory' when the exit scope is then called + log.vdebug("Setting Class Objects to null due to failure scope"); + itemDB = null; + appConfig = null; + oneDriveApiInstance = null; + selectiveSync = null; + syncEngineInstance = null; } - - // read in application options as passed in + + // Read in application options as passed in try { bool printVersion = false; - auto opt = getopt( - args, + auto cliOptions = getopt( + cliArgs, std.getopt.config.passThrough, std.getopt.config.bundling, std.getopt.config.caseSensitive, @@ -151,1302 +132,515 @@ int main(string[] args) "version", "Print the version and exit", &printVersion ); - // print help and exit - if (opt.helpWanted) { - args ~= "--help"; + // Print help and exit + if (cliOptions.helpWanted) { + cliArgs ~= "--help"; } - // print the version and exit + // Print the version and exit if (printVersion) { - writeln("onedrive ", strip(import("version"))); + //writeln("onedrive ", strip(import("version"))); + string tempVersion = "v2.5.0-alpha-2" ~ " GitHub version: " ~ strip(import("version")); + writeln(tempVersion); return EXIT_SUCCESS; } } catch (GetOptException e) { - // option errors + // Option errors log.error(e.msg); - log.error("Try 'onedrive --help' for more information"); + log.error(genericHelpMessage); return EXIT_FAILURE; } catch (Exception e) { - // generic error + // Generic error log.error(e.msg); - log.error("Try 'onedrive --help' for more information"); + log.error(genericHelpMessage); return EXIT_FAILURE; } - - // confdirOption must be a directory, not a file - // - By default ~/.config/onedrive will be used - // - If the user is using --confdir , the confdirOption needs to be evaluated when trying to load any file - // load configuration file if available - auto cfg = new config.Config(confdirOption); - if (!cfg.initialize()) { - // There was an error loading the configuration + + // How was this application started - what options were passed in + log.vdebug("passed in options: ", cliArgs); + log.vdebug("note --confdir and --verbose not listed in 'cliArgs'"); + + // Create a new AppConfig object with default values, + appConfig = new ApplicationConfig(); + // Initialise the application configuration, utilising --confdir if it was passed in + // Otherwise application defaults will be used to configure the application + if (!appConfig.initialise(confdirOption)) { + // There was an error loading the user specified application configuration // Error message already printed return EXIT_FAILURE; } - // How was this application started - what options were passed in - log.vdebug("passed in options: ", args); - log.vdebug("note --confdir and --verbose not listed in args"); - - // set memory display - displayMemoryUsage = cfg.getValueBool("display_memory"); - - // set display sync options - displaySyncOptions = cfg.getValueBool("display_sync_options"); - - // update configuration from command line args - cfg.update_from_args(args); + // Update the existing application configuration (default or 'config' file) from any passed in command line arguments + appConfig.updateFromArgs(cliArgs); - // --resync should be a 'last resort item' .. the user needs to 'accept' to proceed - if ((cfg.getValueBool("resync")) && (!cfg.getValueBool("display_config"))) { - // what is the risk acceptance? - bool resyncRiskAcceptance = false; + // Depreciated options check now that the config file (if present) and CLI options have all been parsed + appConfig.checkDepreciatedOptions(cliArgs); - if (!cfg.getValueBool("resync_auth")) { - // need to prompt user - char response; - // warning message - writeln("\nThe use of --resync will remove your local 'onedrive' client state, thus no record will exist regarding your current 'sync status'"); - writeln("This has the potential to overwrite local versions of files with potentially older versions downloaded from OneDrive which can lead to data loss"); - writeln("If in-doubt, backup your local data first before proceeding with --resync"); - write("\nAre you sure you wish to proceed with --resync? [Y/N] "); - - try { - // Attempt to read user response - readf(" %c\n", &response); - } catch (std.format.FormatException e) { - // Caught an error - return EXIT_FAILURE; - } - - // Evaluate user repsonse - if ((to!string(response) == "y") || (to!string(response) == "Y")) { - // User has accepted --resync risk to proceed - resyncRiskAcceptance = true; - // Are you sure you wish .. does not use writeln(); - write("\n"); - } - } else { - // resync_auth is true - resyncRiskAcceptance = true; - } - - // Action based on response - if (!resyncRiskAcceptance){ - // --resync risk not accepted - return EXIT_FAILURE; - } - } - - // Initialise normalised file paths - configFilePath = buildNormalizedPath(cfg.configDirName ~ "/config"); - syncListFilePath = buildNormalizedPath(cfg.configDirName ~ "/sync_list"); - databaseFilePath = buildNormalizedPath(cfg.configDirName ~ "/items.db"); - businessSharedFolderFilePath = buildNormalizedPath(cfg.configDirName ~ "/business_shared_folders"); - - // Has any of our configuration that would require a --resync been changed? - // 1. sync_list file modification - // 2. config file modification - but only if sync_dir, skip_dir, skip_file or drive_id was modified - // 3. CLI input overriding configured config file option - configHashFile = buildNormalizedPath(cfg.configDirName ~ "/.config.hash"); - syncListHashFile = buildNormalizedPath(cfg.configDirName ~ "/.sync_list.hash"); - configBackupFile = buildNormalizedPath(cfg.configDirName ~ "/.config.backup"); - businessSharedFoldersHashFile = buildNormalizedPath(cfg.configDirName ~ "/.business_shared_folders.hash"); - - // Does a 'config' file exist with a valid hash file - if (exists(configFilePath)) { - if (!exists(configHashFile)) { - // hash of config file needs to be created, but only if we are not in a --resync scenario - if (!cfg.getValueBool("resync")) { - std.file.write(configHashFile, "initial-hash"); - // Hash file should only be readable by the user who created it - 0600 permissions needed - configHashFile.setAttributes(to!int(convertedPermissionValue)); - } - } + // Configure GUI Notifications + // - This option is reverse action. If 'disable_notifications' is 'true', we need to send 'false' + if (appConfig.getValueBool("disable_notifications")){ + // disable_notifications is true, initialise with false + log.enableNotifications(false); } else { - // no 'config' file exists, application defaults being used, no hash file required - if (exists(configHashFile)) { - // remove the hash, but only if --resync was issued as now the application will use 'defaults' which 'may' be different - if (cfg.getValueBool("resync")) { - // resync issued, remove hash files - safeRemove(configHashFile); - safeRemove(configBackupFile); - } - } + log.enableNotifications(true); } - - // Does a 'sync_list' file exist with a valid hash file - if (exists(syncListFilePath)) { - if (!exists(syncListHashFile)) { - // hash of config file needs to be created, but only if we are not in a --resync scenario - if (!cfg.getValueBool("resync")) { - std.file.write(syncListHashFile, "initial-hash"); - // Hash file should only be readable by the user who created it - 0600 permissions needed - syncListHashFile.setAttributes(to!int(convertedPermissionValue)); - } - } - } else { - // no 'sync_list' file exists, no hash file required - if (exists(syncListHashFile)) { - // remove the hash, but only if --resync was issued as now the application will use 'defaults' which 'may' be different - if (cfg.getValueBool("resync")) { - // resync issued, remove hash files - safeRemove(syncListHashFile); - } - } - } - - // Does a 'business_shared_folders' file exist with a valid hash file - if (exists(businessSharedFolderFilePath)) { - if (!exists(businessSharedFoldersHashFile)) { - // hash of config file needs to be created, but only if we are not in a --resync scenario - if (!cfg.getValueBool("resync")) { - std.file.write(businessSharedFoldersHashFile, "initial-hash"); - // Hash file should only be readable by the user who created it - 0600 permissions needed - businessSharedFoldersHashFile.setAttributes(to!int(convertedPermissionValue)); - } - } - } else { - // no 'business_shared_folders' file exists, no hash file required - if (exists(businessSharedFoldersHashFile)) { - // remove the hash, but only if --resync was issued as now the application will use 'defaults' which 'may' be different - if (cfg.getValueBool("resync")) { - // resync issued, remove hash files - safeRemove(businessSharedFoldersHashFile); - } - } - } - - // Generate current hashes for the relevant configuration files if they exist - if (exists(configFilePath)) currentConfigHash = computeQuickXorHash(configFilePath); - if (exists(syncListFilePath)) currentSyncListHash = computeQuickXorHash(syncListFilePath); - if (exists(businessSharedFolderFilePath)) currentBusinessSharedFoldersHash = computeQuickXorHash(businessSharedFolderFilePath); - // read the existing hashes for each of the relevant configuration files if they exist - if (exists(configHashFile)) { - try { - previousConfigHash = readText(configHashFile); - } catch (std.file.FileException e) { - // Unable to access required file - log.error("ERROR: Unable to access ", e.msg); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - } - if (exists(syncListHashFile)) { - try { - previousSyncListHash = readText(syncListHashFile); - } catch (std.file.FileException e) { - // Unable to access required file - log.error("ERROR: Unable to access ", e.msg); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - } - if (exists(businessSharedFoldersHashFile)) { - try { - previousBusinessSharedFoldersHash = readText(businessSharedFoldersHashFile); - } catch (std.file.FileException e) { - // Unable to access required file - log.error("ERROR: Unable to access ", e.msg); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - } - - // Was sync_list file updated? - if (currentSyncListHash != previousSyncListHash) { - // Debugging output to assist what changed - log.vdebug("sync_list file has been updated, --resync needed"); - syncListDifferent = true; + // Configure application logging to a log file only if enabled + // This is the earliest point to do so as the client configuration has been read in, CLI arguments have been processed. + // Either of those could be enabling logging + if (appConfig.getValueBool("enable_logging")){ + // configure the application logging directory + string initialisedLogDirPath = appConfig.initialiseLogDirectory(); + // Initialise using the configured logging directory + log.vlog("Using the following path to store the runtime application log: ", initialisedLogDirPath); + log.initialise(initialisedLogDirPath); } - - // Was business_shared_folders updated? - if (currentBusinessSharedFoldersHash != previousBusinessSharedFoldersHash) { - // Debugging output to assist what changed - log.vdebug("business_shared_folders file has been updated, --resync needed"); - businessSharedFoldersDifferent = true; + + // Configure Client Side Filtering (selective sync) by parsing and getting a usable regex for skip_file, skip_dir and sync_list config components + selectiveSync = new ClientSideFiltering(appConfig); + if (!selectiveSync.initialise()) { + // exit here as something triggered a selective sync configuration failure + return EXIT_FAILURE; } - - // Was config file updated between last execution ang this execution? - if (currentConfigHash != previousConfigHash) { - // config file was updated, however we only want to trigger a --resync requirement if sync_dir, skip_dir, skip_file or drive_id was modified - if (!cfg.getValueBool("display_config")){ - // only print this message if we are not using --display-config - log.log("config file has been updated, checking if --resync needed"); - } - if (exists(configBackupFile)) { - // check backup config what has changed for these configuration options if anything - // # sync_dir = "~/OneDrive" - // # skip_file = "~*|.~*|*.tmp" - // # skip_dir = "" - // # drive_id = "" - string[string] stringValues; - stringValues["sync_dir"] = ""; - stringValues["skip_file"] = ""; - stringValues["skip_dir"] = ""; - stringValues["drive_id"] = ""; - auto configBackupFileHandle = File(configBackupFile, "r"); - string lineBuffer; - auto range = configBackupFileHandle.byLine(); - // read configBackupFile line by line - foreach (line; range) { - lineBuffer = stripLeft(line).to!string; - if (lineBuffer.length == 0 || lineBuffer[0] == ';' || lineBuffer[0] == '#') continue; - auto c = lineBuffer.matchFirst(cfg.configRegex); - if (!c.empty) { - c.popFront(); // skip the whole match - string key = c.front.dup; - auto p = key in stringValues; - if (p) { - c.popFront(); - // compare this key - if ((key == "sync_dir") && (c.front.dup != cfg.getValueString("sync_dir"))) { - log.vdebug(key, " was modified since the last time the application was successfully run, --resync needed"); - configOptionsDifferent = true; - } - - if ((key == "skip_file") && (c.front.dup != cfg.getValueString("skip_file"))){ - log.vdebug(key, " was modified since the last time the application was successfully run, --resync needed"); - configOptionsDifferent = true; - } - if ((key == "skip_dir") && (c.front.dup != cfg.getValueString("skip_dir"))){ - log.vdebug(key, " was modified since the last time the application was successfully run, --resync needed"); - configOptionsDifferent = true; - } - if ((key == "drive_id") && (c.front.dup != cfg.getValueString("drive_id"))){ - log.vdebug(key, " was modified since the last time the application was successfully run, --resync needed"); - configOptionsDifferent = true; - } - } - } - } - // close file if open - if (configBackupFileHandle.isOpen()){ - // close open file - configBackupFileHandle.close(); - } - } else { - // no backup to check - log.vdebug("WARNING: no backup config file was found, unable to validate if any changes made"); - } - - // If there was a backup, any modified values we need to worry about would been detected - if (!cfg.getValueBool("display_config")) { - // we are not testing the configuration - if (!configOptionsDifferent) { - // no options are different - if (!cfg.getValueBool("dry_run")) { - // we are not in a dry-run scenario - // update config hash - log.vdebug("updating config hash as it is out of date"); - std.file.write(configHashFile, computeQuickXorHash(configFilePath)); - // Hash file should only be readable by the user who created it - 0600 permissions needed - configHashFile.setAttributes(to!int(convertedPermissionValue)); - // create backup copy of current config file - log.vdebug("making backup of config file as it is out of date"); - std.file.copy(configFilePath, configBackupFile); - // File Copy should only be readable by the user who created it - 0600 permissions needed - configBackupFile.setAttributes(to!int(convertedPermissionValue)); - } - } + + // Set runtimeDatabaseFile, this will get updated if we are using --dry-run + runtimeDatabaseFile = appConfig.databaseFilePath; + + // Read in 'sync_dir' from appConfig with '~' if present expanded + runtimeSyncDirectory = appConfig.initialiseRuntimeSyncDirectory(); + + // DEVELOPER OPTIONS OUTPUT + // Set to display memory details as early as possible + displayMemoryUsage = appConfig.getValueBool("display_memory"); + // set to display sync options + displaySyncOptions = appConfig.getValueBool("display_sync_options"); + + // Display the current application configuration (based on all defaults, 'config' file parsing and/or options passed in via the CLI) and exit if --display-config has been used + if ((appConfig.getValueBool("display_config")) || (appConfig.getValueBool("display_running_config"))) { + // Display the application configuration + appConfig.displayApplicationConfiguration(); + // Do we exit? We exit only if '--display-config' has been used + if (appConfig.getValueBool("display_config")) { + return EXIT_SUCCESS; } } - - // Is there a backup of the config file if the config file exists? - if ((exists(configFilePath)) && (!exists(configBackupFile))) { - // create backup copy of current config file - std.file.copy(configFilePath, configBackupFile); - // File Copy should only be readable by the user who created it - 0600 permissions needed - configBackupFile.setAttributes(to!int(convertedPermissionValue)); - } - - // config file set options can be changed via CLI input, specifically these will impact sync and --resync will be needed: - // --syncdir ARG - // --skip-file ARG - // --skip-dir ARG - if (exists(configFilePath)) { - // config file exists - // was the sync_dir updated by CLI? - if (cfg.configFileSyncDir != "") { - // sync_dir was set in config file - if (cfg.configFileSyncDir != cfg.getValueString("sync_dir")) { - // config file was set and CLI input changed this - log.vdebug("sync_dir: CLI override of config file option, --resync needed"); - syncDirDifferent = true; - } - } - - // was the skip_file updated by CLI? - if (cfg.configFileSkipFile != "") { - // skip_file was set in config file - if (cfg.configFileSkipFile != cfg.getValueString("skip_file")) { - // config file was set and CLI input changed this - log.vdebug("skip_file: CLI override of config file option, --resync needed"); - skipFileDifferent = true; - } - } - - // was the skip_dir updated by CLI? - if (cfg.configFileSkipDir != "") { - // skip_dir was set in config file - if (cfg.configFileSkipDir != cfg.getValueString("skip_dir")) { - // config file was set and CLI input changed this - log.vdebug("skip_dir: CLI override of config file option, --resync needed"); - skipDirDifferent = true; - } - } + + // Check for basic application option conflicts - flags that should not be used together and/or flag combinations that conflict with each other, values that should be present and are not + if (appConfig.checkForBasicOptionConflicts) { + // Any error will have been printed by the function itself + return EXIT_FAILURE; } - - // Has anything triggered a --resync requirement? - if (configOptionsDifferent || syncListDifferent || syncDirDifferent || skipFileDifferent || skipDirDifferent || businessSharedFoldersDifferent) { - // --resync needed, is the user performing any operation where a --resync is not required? - // flag to ignore --resync requirement - bool ignoreResyncRequirement = false; - // These flags do not need --resync as no sync operation is needed: --display-config, --list-shared-folders, --get-O365-drive-id, --get-file-link - if (cfg.getValueBool("display_config")) ignoreResyncRequirement = true; - if (cfg.getValueBool("list_business_shared_folders")) ignoreResyncRequirement = true; - if ((!cfg.getValueString("get_o365_drive_id").empty)) ignoreResyncRequirement = true; - if ((!cfg.getValueString("get_file_link").empty)) ignoreResyncRequirement = true; + + // Check for --dry-run operation + // If this has been requested, we need to ensure that all actions are performed against the dry-run database copy, and, + // no actual action takes place - such as deleting files if deleted online, moving files if moved online or local, downloading new & changed files, uploading new & changed files + if (appConfig.getValueBool("dry_run")) { + // this is a --dry-run operation + log.log("DRY-RUN Configured. Output below shows what 'would' have occurred."); - // Do we need to ignore a --resync requirement? - if (!ignoreResyncRequirement) { - // We are not ignoring --requirement - if (!cfg.getValueBool("resync")) { - // --resync not issued, fail fast - log.error("An application configuration change has been detected where a --resync is required"); - return EXIT_RESYNC_REQUIRED; - } else { - // --resync issued, update hashes of config files if they exist - if (!cfg.getValueBool("dry_run")) { - // not doing a dry run, update hash files if config & sync_list exist - if (exists(configFilePath)) { - // update hash - log.vdebug("updating config hash as --resync issued"); - std.file.write(configHashFile, computeQuickXorHash(configFilePath)); - // Hash file should only be readable by the user who created it - 0600 permissions needed - configHashFile.setAttributes(to!int(convertedPermissionValue)); - // create backup copy of current config file - log.vdebug("making backup of config file as --resync issued"); - std.file.copy(configFilePath, configBackupFile); - // File copy should only be readable by the user who created it - 0600 permissions needed - configBackupFile.setAttributes(to!int(convertedPermissionValue)); - } - if (exists(syncListFilePath)) { - // update sync_list hash - log.vdebug("updating sync_list hash as --resync issued"); - std.file.write(syncListHashFile, computeQuickXorHash(syncListFilePath)); - // Hash file should only be readable by the user who created it - 0600 permissions needed - syncListHashFile.setAttributes(to!int(convertedPermissionValue)); - } - if (exists(businessSharedFolderFilePath)) { - // update business_shared_folders hash - log.vdebug("updating business_shared_folders hash as --resync issued"); - std.file.write(businessSharedFoldersHashFile, computeQuickXorHash(businessSharedFolderFilePath)); - // Hash file should only be readable by the user who created it - 0600 permissions needed - businessSharedFoldersHashFile.setAttributes(to!int(convertedPermissionValue)); - } - } - } - } - } - - // --dry-run operation notification and database setup - // Are we performing any of the following operations? - // --dry-run, --list-shared-folders, --get-O365-drive-id, --get-file-link - if ((cfg.getValueBool("dry_run")) || (cfg.getValueBool("list_business_shared_folders")) || (!cfg.getValueString("get_o365_drive_id").empty) || (!cfg.getValueString("get_file_link").empty)) { - // is this a --list-shared-folders, --get-O365-drive-id, --get-file-link operation - if (cfg.getValueBool("dry_run")) { - // this is a --dry-run operation - log.log("DRY-RUN Configured. Output below shows what 'would' have occurred."); - } else { - // is this a --list-shared-folders, --get-O365-drive-id, --get-file-link operation - log.log("Using dry-run database copy for OneDrive API query"); - } - // configure databaseFilePathDryRunGlobal - databaseFilePathDryRunGlobal = cfg.databaseFilePathDryRun; + // Cleanup any existing dry-run elements ... these should never be left hanging around + cleanupDryRunDatabaseFiles(appConfig.databaseFilePathDryRun); - string dryRunShmFile = databaseFilePathDryRunGlobal ~ "-shm"; - string dryRunWalFile = databaseFilePathDryRunGlobal ~ "-wal"; - // If the dry run database exists, clean this up - if (exists(databaseFilePathDryRunGlobal)) { - // remove the existing file - log.vdebug("Removing items-dryrun.sqlite3 as it still exists for some reason"); - safeRemove(databaseFilePathDryRunGlobal); - } - // silent cleanup of shm and wal files if they exist - if (exists(dryRunShmFile)) { - // remove items-dryrun.sqlite3-shm - safeRemove(dryRunShmFile); - } - if (exists(dryRunWalFile)) { - // remove items-dryrun.sqlite3-wal - safeRemove(dryRunWalFile); - } - // Make a copy of the original items.sqlite3 for use as the dry run copy if it exists - if (exists(cfg.databaseFilePath)) { - // in a --dry-run --resync scenario, we should not copy the existing database file - if (!cfg.getValueBool("resync")) { - // copy the existing DB file to the dry-run copy - log.vdebug("Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations"); - copy(cfg.databaseFilePath,databaseFilePathDryRunGlobal); - } else { - // no database copy due to --resync - log.vdebug("No database copy created for --dry-run due to --resync also being used"); - } - } - } - - // sync_dir environment handling to handle ~ expansion properly - bool shellEnvSet = false; - if ((environment.get("SHELL") == "") && (environment.get("USER") == "")){ - log.vdebug("sync_dir: No SHELL or USER environment variable configuration detected"); - // No shell or user set, so expandTilde() will fail - usually headless system running under init.d / systemd or potentially Docker - // Does the 'currently configured' sync_dir include a ~ - if (canFind(cfg.getValueString("sync_dir"), "~")) { - // A ~ was found in sync_dir - log.vdebug("sync_dir: A '~' was found in sync_dir, using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set"); - syncDir = cfg.homePath ~ strip(cfg.getValueString("sync_dir"), "~"); - } else { - // No ~ found in sync_dir, use as is - log.vdebug("sync_dir: Getting syncDir from config value sync_dir"); - syncDir = cfg.getValueString("sync_dir"); - } - } else { - // A shell and user is set, expand any ~ as this will be expanded correctly if present - shellEnvSet = true; - log.vdebug("sync_dir: Getting syncDir from config value sync_dir"); - if (canFind(cfg.getValueString("sync_dir"), "~")) { - log.vdebug("sync_dir: A '~' was found in configured sync_dir, automatically expanding as SHELL and USER environment variable is set"); - syncDir = expandTilde(cfg.getValueString("sync_dir")); - } else { - syncDir = cfg.getValueString("sync_dir"); - } - } - - // vdebug syncDir as set and calculated - log.vdebug("syncDir: ", syncDir); - - // Configure the logging directory if different from application default - // log_dir environment handling to handle ~ expansion properly - string logDir = cfg.getValueString("log_dir"); - if (logDir != cfg.defaultLogFileDir) { - // user modified log_dir entry - // if 'log_dir' contains a '~' this needs to be expanded correctly - if (canFind(cfg.getValueString("log_dir"), "~")) { - // ~ needs to be expanded correctly - if (!shellEnvSet) { - // No shell or user set, so expandTilde() will fail - usually headless system running under init.d / systemd or potentially Docker - log.vdebug("log_dir: A '~' was found in log_dir, using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set"); - logDir = cfg.homePath ~ strip(cfg.getValueString("log_dir"), "~"); + if (exists(appConfig.databaseFilePath)) { + // In a --dry-run --resync scenario, we should not copy the existing database file + if (!appConfig.getValueBool("resync")) { + // Copy the existing DB file to the dry-run copy + log.log("DRY-RUN: Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations"); + copy(appConfig.databaseFilePath,appConfig.databaseFilePathDryRun); } else { - // A shell and user is set, expand any ~ as this will be expanded correctly if present - log.vdebug("log_dir: A '~' was found in log_dir, using SHELL or USER environment variable to expand '~'"); - logDir = expandTilde(cfg.getValueString("log_dir")); + // No database copy due to --resync + log.log("DRY-RUN: No database copy created for --dry-run due to --resync also being used"); } - } else { - // '~' not found in log_dir entry, use as is - logDir = cfg.getValueString("log_dir"); } - // update log_dir with normalised path, with '~' expanded correctly - cfg.setValueString("log_dir", logDir); + // update runtimeDatabaseFile now that we are using the dry run path + runtimeDatabaseFile = appConfig.databaseFilePathDryRun; } - - // Configure logging only if enabled - if (cfg.getValueBool("enable_logging")){ - // Initialise using the configured logging directory - log.vlog("Using logfile dir: ", logDir); - log.init(logDir); - } - - // Configure whether notifications are used - log.setNotifications(cfg.getValueBool("monitor") && !cfg.getValueBool("disable_notifications")); - - // Application upgrades - skilion version etc - if (exists(databaseFilePath)) { - if (!cfg.getValueBool("dry_run")) { - safeRemove(databaseFilePath); - } - log.logAndNotify("Database schema changed, resync needed"); - cfg.setValueBool("resync", true); - } - + // Handle --logout as separate item, do not 'resync' on a --logout - if (cfg.getValueBool("logout")) { + if (appConfig.getValueBool("logout")) { log.vdebug("--logout requested"); log.log("Deleting the saved authentication status ..."); - if (!cfg.getValueBool("dry_run")) { - safeRemove(cfg.refreshTokenFilePath); + if (!appConfig.getValueBool("dry_run")) { + safeRemove(appConfig.refreshTokenFilePath); + } else { + // --dry-run scenario ... technically we should not be making any local file changes ....... + log.log("DRY RUN: Not removing the saved authentication status"); } // Exit return EXIT_SUCCESS; } // Handle --reauth to re-authenticate the client - if (cfg.getValueBool("reauth")) { + if (appConfig.getValueBool("reauth")) { log.vdebug("--reauth requested"); log.log("Deleting the saved authentication status ... re-authentication requested"); - if (!cfg.getValueBool("dry_run")) { - safeRemove(cfg.refreshTokenFilePath); + if (!appConfig.getValueBool("dry_run")) { + safeRemove(appConfig.refreshTokenFilePath); + } else { + // --dry-run scenario ... technically we should not be making any local file changes ....... + log.log("DRY RUN: Not removing the saved authentication status"); } } - // Display current application configuration - if ((cfg.getValueBool("display_config")) || (cfg.getValueBool("display_running_config"))) { - if (cfg.getValueBool("display_running_config")) { - writeln("--------------- Application Runtime Configuration ---------------"); - } - - // Display application version - writeln("onedrive version = ", strip(import("version"))); - // Display all of the pertinent configuration options - writeln("Config path = ", cfg.configDirName); - // Does a config file exist or are we using application defaults - writeln("Config file found in config path = ", exists(configFilePath)); - - // Is config option drive_id configured? - if (cfg.getValueString("drive_id") != ""){ - writeln("Config option 'drive_id' = ", cfg.getValueString("drive_id")); - } - - // Config Options as per 'config' file - writeln("Config option 'sync_dir' = ", syncDir); - - // logging and notifications - writeln("Config option 'enable_logging' = ", cfg.getValueBool("enable_logging")); - writeln("Config option 'log_dir' = ", cfg.getValueString("log_dir")); - writeln("Config option 'disable_notifications' = ", cfg.getValueBool("disable_notifications")); - writeln("Config option 'min_notify_changes' = ", cfg.getValueLong("min_notify_changes")); - - // skip files and directory and 'matching' policy - writeln("Config option 'skip_dir' = ", cfg.getValueString("skip_dir")); - writeln("Config option 'skip_dir_strict_match' = ", cfg.getValueBool("skip_dir_strict_match")); - writeln("Config option 'skip_file' = ", cfg.getValueString("skip_file")); - writeln("Config option 'skip_dotfiles' = ", cfg.getValueBool("skip_dotfiles")); - writeln("Config option 'skip_symlinks' = ", cfg.getValueBool("skip_symlinks")); - - // --monitor sync process options - writeln("Config option 'monitor_interval' = ", cfg.getValueLong("monitor_interval")); - writeln("Config option 'monitor_log_frequency' = ", cfg.getValueLong("monitor_log_frequency")); - writeln("Config option 'monitor_fullscan_frequency' = ", cfg.getValueLong("monitor_fullscan_frequency")); - - // sync process and method - writeln("Config option 'read_only_auth_scope' = ", cfg.getValueBool("read_only_auth_scope")); - writeln("Config option 'dry_run' = ", cfg.getValueBool("dry_run")); - writeln("Config option 'upload_only' = ", cfg.getValueBool("upload_only")); - writeln("Config option 'download_only' = ", cfg.getValueBool("download_only")); - writeln("Config option 'local_first' = ", cfg.getValueBool("local_first")); - writeln("Config option 'check_nosync' = ", cfg.getValueBool("check_nosync")); - writeln("Config option 'check_nomount' = ", cfg.getValueBool("check_nomount")); - writeln("Config option 'resync' = ", cfg.getValueBool("resync")); - writeln("Config option 'resync_auth' = ", cfg.getValueBool("resync_auth")); - writeln("Config option 'cleanup_local_files' = ", cfg.getValueBool("cleanup_local_files")); - - // data integrity - writeln("Config option 'classify_as_big_delete' = ", cfg.getValueLong("classify_as_big_delete")); - writeln("Config option 'disable_upload_validation' = ", cfg.getValueBool("disable_upload_validation")); - writeln("Config option 'bypass_data_preservation' = ", cfg.getValueBool("bypass_data_preservation")); - writeln("Config option 'no_remote_delete' = ", cfg.getValueBool("no_remote_delete")); - writeln("Config option 'remove_source_files' = ", cfg.getValueBool("remove_source_files")); - writeln("Config option 'sync_dir_permissions' = ", cfg.getValueLong("sync_dir_permissions")); - writeln("Config option 'sync_file_permissions' = ", cfg.getValueLong("sync_file_permissions")); - writeln("Config option 'space_reservation' = ", cfg.getValueLong("space_reservation")); - - // curl operations - writeln("Config option 'application_id' = ", cfg.getValueString("application_id")); - writeln("Config option 'azure_ad_endpoint' = ", cfg.getValueString("azure_ad_endpoint")); - writeln("Config option 'azure_tenant_id' = ", cfg.getValueString("azure_tenant_id")); - writeln("Config option 'user_agent' = ", cfg.getValueString("user_agent")); - writeln("Config option 'force_http_11' = ", cfg.getValueBool("force_http_11")); - writeln("Config option 'debug_https' = ", cfg.getValueBool("debug_https")); - writeln("Config option 'rate_limit' = ", cfg.getValueLong("rate_limit")); - writeln("Config option 'operation_timeout' = ", cfg.getValueLong("operation_timeout")); - writeln("Config option 'dns_timeout' = ", cfg.getValueLong("dns_timeout")); - writeln("Config option 'connect_timeout' = ", cfg.getValueLong("connect_timeout")); - writeln("Config option 'data_timeout' = ", cfg.getValueLong("data_timeout")); - writeln("Config option 'ip_protocol_version' = ", cfg.getValueLong("ip_protocol_version")); - - // Is sync_list configured ? - writeln("Config option 'sync_root_files' = ", cfg.getValueBool("sync_root_files")); - if (exists(syncListFilePath)){ - - writeln("Selective sync 'sync_list' configured = true"); - writeln("sync_list contents:"); - // Output the sync_list contents - auto syncListFile = File(syncListFilePath, "r"); - auto range = syncListFile.byLine(); - foreach (line; range) - { - writeln(line); - } + // --resync should be considered a 'last resort item' or if the application configuration has changed, where a resync is needed .. the user needs to 'accept' this warning to proceed + // If --resync has not been used (bool value is false), check the application configuration for 'changes' that require a --resync to ensure that the data locally reflects the users requested configuration + if (appConfig.getValueBool("resync")) { + // what is the risk acceptance for --resync? + bool resyncRiskAcceptance = appConfig.displayResyncRiskForAcceptance(); + log.vdebug("Returned --resync risk acceptance: ", resyncRiskAcceptance); + // Action based on user response + if (!resyncRiskAcceptance){ + // --resync risk not accepted + return EXIT_FAILURE; } else { - writeln("Selective sync 'sync_list' configured = false"); - + log.vdebug("--resync issued and risk accepted"); + // --resync risk accepted, perform a cleanup of items that require a cleanup + appConfig.cleanupHashFilesDueToResync(); + // Make a backup of the applicable configuration file + appConfig.createBackupConfigFile(); + // Update hash files and generate a new config backup + appConfig.updateHashContentsForConfigFiles(); + // Remove the items database + processResyncDatabaseRemoval(runtimeDatabaseFile); } - - // Is business_shared_folders enabled and configured ? - writeln("Config option 'sync_business_shared_folders' = ", cfg.getValueBool("sync_business_shared_folders")); - if (exists(businessSharedFolderFilePath)){ - writeln("Business Shared Folders configured = true"); - writeln("business_shared_folders contents:"); - // Output the business_shared_folders contents - auto businessSharedFolderFileList = File(businessSharedFolderFilePath, "r"); - auto range = businessSharedFolderFileList.byLine(); - foreach (line; range) - { - writeln(line); - } + } else { + // Has any of our application configuration that would require a --resync been changed? + if (appConfig.applicationChangeWhereResyncRequired()) { + // Application configuration has changed however --resync not issued, fail fast + log.error("\nAn application configuration change has been detected where a --resync is required\n"); + return EXIT_RESYNC_REQUIRED; } else { - writeln("Business Shared Folders configured = false"); - } - - // Are webhooks enabled? - writeln("Config option 'webhook_enabled' = ", cfg.getValueBool("webhook_enabled")); - if (cfg.getValueBool("webhook_enabled")) { - writeln("Config option 'webhook_public_url' = ", cfg.getValueString("webhook_public_url")); - writeln("Config option 'webhook_listening_host' = ", cfg.getValueString("webhook_listening_host")); - writeln("Config option 'webhook_listening_port' = ", cfg.getValueLong("webhook_listening_port")); - writeln("Config option 'webhook_expiration_interval' = ", cfg.getValueLong("webhook_expiration_interval")); - writeln("Config option 'webhook_renewal_interval' = ", cfg.getValueLong("webhook_renewal_interval")); - } - - if (cfg.getValueBool("display_running_config")) { - writeln("-----------------------------------------------------------------"); - } - - // Do we exit? We only exit if --display-config has been used - if (cfg.getValueBool("display_config")) { - return EXIT_SUCCESS; + // No configuration change that requires a --resync to be issued + // Make a backup of the applicable configuration file + appConfig.createBackupConfigFile(); + // Update hash files and generate a new config backup + appConfig.updateHashContentsForConfigFiles(); } } - - // --upload-only and --download-only are mutually exclusive and cannot be used together - if ((cfg.getValueBool("upload_only")) && (cfg.getValueBool("download_only"))) { - // both cannot be true at the same time - writeln("ERROR: --upload-only and --download-only are mutually exclusive and cannot be used together.\n"); - return EXIT_FAILURE; - } - - // Handle the actual --resync to remove local files - if (cfg.getValueBool("resync")) { - log.vdebug("--resync requested"); - log.vdebug("Testing if we have exclusive access to local database file"); - // Are we the only running instance? Test that we can open the database file path - itemDb = new ItemDatabase(cfg.databaseFilePath); - - // did we successfully initialise the database class? - if (!itemDb.isDatabaseInitialised()) { - // no .. destroy class - itemDb = null; - // exit application + + // Implement https://github.com/abraunegg/onedrive/issues/1129 + // Force a synchronization of a specific folder, only when using --synchronize --single-directory and ignoring all non-default skip_dir and skip_file rules + if (appConfig.getValueBool("force_sync")) { + // appConfig.checkForBasicOptionConflicts() has already checked for the basic requirements for --force-sync + log.log("\nWARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --synch --single-directory --force-sync being used"); + bool forceSyncRiskAcceptance = appConfig.displayForceSyncRiskForAcceptance(); + log.vdebug("Returned --force-sync risk acceptance: ", forceSyncRiskAcceptance); + // Action based on user response + if (!forceSyncRiskAcceptance){ + // --force-sync risk not accepted return EXIT_FAILURE; - } - - // If we have exclusive access we will not have exited - // destroy access test - destroy(itemDb); - // delete application sync state - log.log("Deleting the saved application sync status ..."); - if (!cfg.getValueBool("dry_run")) { - safeRemove(cfg.databaseFilePath); - safeRemove(cfg.deltaLinkFilePath); - safeRemove(cfg.uploadStateFilePath); + } else { + // --force-sync risk accepted + // reset set config using function to use application defaults + appConfig.resetSkipToDefaults(); + // update sync engine regex with reset defaults + selectiveSync.setDirMask(appConfig.getValueString("skip_dir")); + selectiveSync.setFileMask(appConfig.getValueString("skip_file")); } } // Test if OneDrive service can be reached, exit if it cant be reached log.vdebug("Testing network to ensure network connectivity to Microsoft OneDrive Service"); - online = testNetwork(cfg); + online = testInternetReachability(appConfig); + + // If we are not 'online' - how do we handle this situation? if (!online) { - // Cant initialise the API as we are not online - if (!cfg.getValueBool("monitor")) { + // We are unable to initialise the OneDrive API as we are not online + if (!appConfig.getValueBool("monitor")) { // Running as --synchronize - log.error("Unable to reach Microsoft OneDrive API service, unable to initialize application\n"); + log.error("Unable to reach Microsoft OneDrive API service, unable to initialise application\n"); return EXIT_FAILURE; } else { // Running as --monitor - log.error("Unable to reach Microsoft OneDrive API service at this point in time, re-trying network tests\n"); - // re-try network connection to OneDrive - // https://github.com/abraunegg/onedrive/issues/1184 - // Back off & retry with incremental delay - int retryCount = 10000; - int retryAttempts = 1; - int backoffInterval = 1; - int maxBackoffInterval = 3600; - - bool retrySuccess = false; - while (!retrySuccess){ - // retry to access OneDrive API - backoffInterval++; - int thisBackOffInterval = retryAttempts*backoffInterval; - log.vdebug(" Retry Attempt: ", retryAttempts); - if (thisBackOffInterval <= maxBackoffInterval) { - log.vdebug(" Retry In (seconds): ", thisBackOffInterval); - Thread.sleep(dur!"seconds"(thisBackOffInterval)); - } else { - log.vdebug(" Retry In (seconds): ", maxBackoffInterval); - Thread.sleep(dur!"seconds"(maxBackoffInterval)); - } - // perform the re-rty - online = testNetwork(cfg); - if (online) { - // We are now online - log.log("Internet connectivity to Microsoft OneDrive service has been restored"); - retrySuccess = true; - } else { - // We are still offline - if (retryAttempts == retryCount) { - // we have attempted to re-connect X number of times - // false set this to true to break out of while loop - retrySuccess = true; - } - } - // Increment & loop around - retryAttempts++; - } - if (!online) { - // Not online after 1.2 years of trying - log.error("ERROR: Was unable to reconnect to the Microsoft OneDrive service after 10000 attempts lasting over 1.2 years!"); + log.error("Unable to reach the Microsoft OneDrive API service at this point in time, re-trying network tests based on applicable intervals\n"); + if (!retryInternetConnectivtyTest(appConfig)) { return EXIT_FAILURE; } } } - // Check application version and Initialize OneDrive API, check for authorization + // This needs to be a separate 'if' statement, as, if this was an 'if-else' from above, if we were originally offline and using --monitor, we would never get to this point if (online) { // Check Application Version log.vlog("Checking Application Version ..."); checkApplicationVersion(); - - // we can only initialise if we are online - log.vlog("Initializing the OneDrive API ..."); - oneDrive = new OneDriveApi(cfg); - onedriveInitialised = oneDrive.init(); - oneDrive.printAccessToken = cfg.getValueBool("print_token"); - } - - if (!onedriveInitialised) { - log.error("Could not initialize the OneDrive API"); - // Use exit scopes to shutdown API - return EXIT_UNAUTHORIZED; - } - - // if --synchronize or --monitor not passed in, configure the flag to display help & exit - if (cfg.getValueBool("synchronize") || cfg.getValueBool("monitor")) { - performSyncOK = true; - } - - // --source-directory must only be used with --destination-directory - // neither can (or should) be added individually as they have a no operational impact if they are - if (((cfg.getValueString("source_directory") == "") && (cfg.getValueString("destination_directory") != "")) || ((cfg.getValueString("source_directory") != "") && (cfg.getValueString("destination_directory") == ""))) { - // so either --source-directory or --destination-directory was passed in, without the other required item being passed in - // --source-directory or --destination-directory cannot be used with --synchronize or --monitor - writeln(); - if (performSyncOK) { - // log an error - log.error("ERROR: --source-directory or --destination-directory cannot be used with --synchronize or --monitor"); - } else { - // display issue with using these options - string emptyParameter; - string dataParameter; - if (cfg.getValueString("source_directory").empty) { - emptyParameter = "--source-directory"; - dataParameter = "--destination-directory"; - } else { - emptyParameter = "--destination-directory"; - dataParameter = "--source-directory"; - } - log.error("ERROR: " ~ dataParameter ~ " was passed in without also using " ~ emptyParameter); - } - // Use exit scopes to shutdown API - writeln(); - log.error(helpMessage); - writeln(); - return EXIT_FAILURE; - } - - // --create-directory, --remove-directory, --source-directory, --destination-directory - // these are activities that dont perform a sync, so to not generate an error message for these items either - if (((cfg.getValueString("create_directory") != "") || (cfg.getValueString("remove_directory") != "")) || ((cfg.getValueString("source_directory") != "") && (cfg.getValueString("destination_directory") != "")) || (cfg.getValueString("get_file_link") != "") || (cfg.getValueString("modified_by") != "") || (cfg.getValueString("create_share_link") != "") || (cfg.getValueString("get_o365_drive_id") != "") || cfg.getValueBool("display_sync_status") || cfg.getValueBool("list_business_shared_folders")) { - performSyncOK = true; - } - - // Were acceptable sync operations provided? Was --synchronize or --monitor passed in - if (!performSyncOK) { - // was the application just authorised? - if (cfg.applicationAuthorizeResponseUri) { - // Application was just authorised - if (exists(cfg.refreshTokenFilePath)) { - // OneDrive refresh token exists - log.log("\nApplication has been successfully authorised, however no additional command switches were provided.\n"); - log.log(helpMessage); - writeln(); - // Use exit scopes to shutdown API - return EXIT_SUCCESS; - } else { - // we just authorised, but refresh_token does not exist .. probably an auth error - log.log("\nApplication has not been successfully authorised. Please check your URI response entry and try again.\n"); + // Initialise the OneDrive API + log.vlog("Attempting to initialise the OneDrive API ..."); + oneDriveApiInstance = new OneDriveApi(appConfig); + appConfig.apiWasInitialised = oneDriveApiInstance.initialise(); + if (appConfig.apiWasInitialised) { + log.vlog("The OneDrive API was initialised successfully"); + // Flag that we were able to initalise the API in the application config + oneDriveApiInstance.debugOutputConfiguredAPIItems(); + + // Need to configure the itemDB and syncEngineInstance for 'sync' and 'non-sync' operations + log.vlog("Opening the item database ..."); + // Configure the Item Database + itemDB = new ItemDatabase(runtimeDatabaseFile); + // Was the database successfully initialised? + if (!itemDB.isDatabaseInitialised()) { + // no .. destroy class + itemDB = null; + // exit application return EXIT_FAILURE; } - } else { - // Application was not just authorised - log.log("\n--synchronize or --monitor switches missing from your command line input. Please add one (not both) of these switches to your command line or use 'onedrive --help' for further assistance.\n"); - log.log("No OneDrive sync will be performed without one of these two arguments being present.\n"); - // Use exit scopes to shutdown API - invalidSyncExit = true; - return EXIT_FAILURE; - } - } - - // if --synchronize && --monitor passed in, exit & display help as these conflict with each other - if (cfg.getValueBool("synchronize") && cfg.getValueBool("monitor")) { - writeln(); - log.error("ERROR: --synchronize and --monitor cannot be used together"); - writeln(); - log.error(helpMessage); - writeln(); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - - // Initialize the item database - log.vlog("Opening the item database ..."); - // Are we performing any of the following operations? - // --dry-run, --list-shared-folders, --get-O365-drive-id, --get-file-link - if ((cfg.getValueBool("dry_run")) || (cfg.getValueBool("list_business_shared_folders")) || (!cfg.getValueString("get_o365_drive_id").empty) || (!cfg.getValueString("get_file_link").empty)) { - // Load the items-dryrun.sqlite3 file as the database - log.vdebug("Using database file: ", asNormalizedPath(databaseFilePathDryRunGlobal)); - itemDb = new ItemDatabase(databaseFilePathDryRunGlobal); - } else { - // Not a dry-run scenario or trying to query O365 Library - should be the default scenario - // Load the items.sqlite3 file as the database - log.vdebug("Using database file: ", asNormalizedPath(cfg.databaseFilePath)); - itemDb = new ItemDatabase(cfg.databaseFilePath); - } - - // did we successfully initialise the database class? - if (!itemDb.isDatabaseInitialised()) { - // no .. destroy class - itemDb = null; - // exit application - return EXIT_FAILURE; - } - - // What are the permission that have been set for the application? - // These are relevant for: - // - The ~/OneDrive parent folder or 'sync_dir' configured item - // - Any new folder created under ~/OneDrive or 'sync_dir' - // - Any new file created under ~/OneDrive or 'sync_dir' - // valid permissions are 000 -> 777 - anything else is invalid - if ((cfg.getValueLong("sync_dir_permissions") < 0) || (cfg.getValueLong("sync_file_permissions") < 0) || (cfg.getValueLong("sync_dir_permissions") > 777) || (cfg.getValueLong("sync_file_permissions") > 777)) { - log.error("ERROR: Invalid 'User|Group|Other' permissions set within config file. Please check."); - return EXIT_FAILURE; - } else { - // debug log output what permissions are being set to - log.vdebug("Configuring default new folder permissions as: ", cfg.getValueLong("sync_dir_permissions")); - cfg.configureRequiredDirectoryPermisions(); - log.vdebug("Configuring default new file permissions as: ", cfg.getValueLong("sync_file_permissions")); - cfg.configureRequiredFilePermisions(); - } - - // configure the sync direcory based on syncDir config option - log.vlog("All operations will be performed in: ", syncDir); - try { - if (!exists(syncDir)) { - log.vdebug("syncDir: Configured syncDir is missing. Creating: ", syncDir); - try { - // Attempt to create the sync dir we have been configured with - mkdirRecurse(syncDir); - // Configure the applicable permissions for the folder - log.vdebug("Setting directory permissions for: ", syncDir); - syncDir.setAttributes(cfg.returnRequiredDirectoryPermisions()); - } catch (std.file.FileException e) { - // Creating the sync directory failed - log.error("ERROR: Unable to create local OneDrive syncDir - ", e.msg); + + // Initialise the syncEngine + syncEngineInstance = new SyncEngine(appConfig, itemDB, selectiveSync); + appConfig.syncEngineWasInitialised = syncEngineInstance.initialise(); + + // Are we not doing a --sync or a --monitor operation? Both of these will be false if they are not set + if ((!appConfig.getValueBool("synchronize")) && (!appConfig.getValueBool("monitor"))) { + + // Are we performing some sort of 'no-sync' task? + // - Are we obtaining the Office 365 Drive ID for a given Office 365 SharePoint Shared Library? + // - Are we displaying the sync satus? + // - Are we getting the URL for a file online + // - Are we listing who modified a file last online + // - Are we createing a shareable link for an existing file on OneDrive? + // - Are we just creating a directory online, without any sync being performed? + // - Are we just deleting a directory online, without any sync being performed? + // - Are we renaming or moving a directory? + + // --get-sharepoint-drive-id - Get the SharePoint Library drive_id + if (appConfig.getValueString("sharepoint_library_name") != "") { + // Get the SharePoint Library drive_id + syncEngineInstance.querySiteCollectionForDriveID(appConfig.getValueString("sharepoint_library_name")); + // Exit application + // Use exit scopes to shutdown API and cleanup data + return EXIT_SUCCESS; + } + + // --display-sync-status - Query the sync status + if (appConfig.getValueBool("display_sync_status")) { + // path to query variable + string pathToQueryStatusOn; + // What path do we query? + if (!appConfig.getValueString("single_directory").empty) { + pathToQueryStatusOn = "/" ~ appConfig.getValueString("single_directory"); + } else { + pathToQueryStatusOn = "/"; + } + // Query the sync status + syncEngineInstance.queryOneDriveForSyncStatus(pathToQueryStatusOn); + // Exit application + // Use exit scopes to shutdown API and cleanup data + return EXIT_SUCCESS; + } + + // --get-file-link - Get the URL path for a synced file? + if (appConfig.getValueString("get_file_link") != "") { + // Query the OneDrive API for the file link + syncEngineInstance.queryOneDriveForFileDetails(appConfig.getValueString("get_file_link"), runtimeSyncDirectory, "URL"); + // Exit application + // Use exit scopes to shutdown API and cleanup data + return EXIT_SUCCESS; + } + + // --modified-by - Are we listing the modified-by details of a provided path? + if (appConfig.getValueString("modified_by") != "") { + // Query the OneDrive API for the last modified by details + syncEngineInstance.queryOneDriveForFileDetails(appConfig.getValueString("modified_by"), runtimeSyncDirectory, "ModifiedBy"); + // Exit application + // Use exit scopes to shutdown API and cleanup data + return EXIT_SUCCESS; + } + + // --create-share-link - Are we createing a shareable link for an existing file on OneDrive? + if (appConfig.getValueString("create_share_link") != "") { + // Query OneDrive for the file, and if valid, create a shareable link for the file + + // By default, the shareable link will be read-only. + // If the user adds: + // --with-editing-perms + // this will create a writeable link + syncEngineInstance.queryOneDriveForFileDetails(appConfig.getValueString("create_share_link"), runtimeSyncDirectory, "ShareableLink"); + // Exit application + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } + + // --create-directory - Are we just creating a directory online, without any sync being performed? + if ((appConfig.getValueString("create_directory") != "")) { + // Handle the remote path creation and updating of the local database without performing a sync + syncEngineInstance.createDirectoryOnline(appConfig.getValueString("create_directory")); + // Exit application + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } + + // --remove-directory - Are we just deleting a directory online, without any sync being performed? + if ((appConfig.getValueString("remove_directory") != "")) { + // Handle the remote path deletion without performing a sync + syncEngineInstance.deleteByPath(appConfig.getValueString("remove_directory")); + // Exit application + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } + + // Are we renaming or moving a directory? + // onedrive --source-directory 'path/as/source/' --destination-directory 'path/as/destination' + if ((appConfig.getValueString("source_directory") != "") && (appConfig.getValueString("destination_directory") != "")) { + // We are renaming or moving a directory + syncEngineInstance.uploadMoveItem(appConfig.getValueString("source_directory"), appConfig.getValueString("destination_directory")); + // Exit application + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } + + // If we get to this point, we have not performed a 'no-sync' task .. + log.error("\nYour command line input is missing either the '--sync' or '--monitor' switches. Please include one (but not both) of these switches in your command line, or refer to 'onedrive --help' for additional guidance.\n"); + log.error("It is important to note that you must include one of these two arguments in your command line for the application to perform a synchronisation with Microsoft OneDrive\n"); // Use exit scopes to shutdown API + // invalidSyncExit = true; return EXIT_FAILURE; } + // We do not need this instance, as the API was initialised, and individual instances are used during sync process + oneDriveApiInstance.shutdown(); + } else { + // API could not be initialised + log.error("The OneDrive API could not be initialised"); + return EXIT_FAILURE; } - } catch (std.file.FileException e) { - // Creating the sync directory failed - log.error("ERROR: Unable to test the configured OneDrive syncDir - ", e.msg); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - - // Change the working directory to the 'sync_dir' configured item - chdir(syncDir); - - // Configure selective sync by parsing and getting a regex for skip_file config component - auto selectiveSync = new SelectiveSync(); - - // load sync_list if it exists - if (exists(syncListFilePath)){ - log.vdebug("Loading user configured sync_list file ..."); - syncListConfigured = true; - // list what will be synced - auto syncListFile = File(syncListFilePath, "r"); - auto range = syncListFile.byLine(); - foreach (line; range) - { - log.vdebug("sync_list: ", line); - } - // close syncListFile if open - if (syncListFile.isOpen()){ - // close open file - syncListFile.close(); - } - } - selectiveSync.load(syncListFilePath); - - // load business_shared_folders if it exists - if (exists(businessSharedFolderFilePath)){ - log.vdebug("Loading user configured business_shared_folders file ..."); - // list what will be synced - auto businessSharedFolderFileList = File(businessSharedFolderFilePath, "r"); - auto range = businessSharedFolderFileList.byLine(); - foreach (line; range) - { - log.vdebug("business_shared_folders: ", line); - } - } - selectiveSync.loadSharedFolders(businessSharedFolderFilePath); - - // Configure skip_dir, skip_file, skip-dir-strict-match & skip_dotfiles from config entries - // Handle skip_dir configuration in config file - log.vdebug("Configuring skip_dir ..."); - log.vdebug("skip_dir: ", cfg.getValueString("skip_dir")); - selectiveSync.setDirMask(cfg.getValueString("skip_dir")); - - // Was --skip-dir-strict-match configured? - log.vdebug("Configuring skip_dir_strict_match ..."); - log.vdebug("skip_dir_strict_match: ", cfg.getValueBool("skip_dir_strict_match")); - if (cfg.getValueBool("skip_dir_strict_match")) { - selectiveSync.setSkipDirStrictMatch(); - } - - // Was --skip-dot-files configured? - log.vdebug("Configuring skip_dotfiles ..."); - log.vdebug("skip_dotfiles: ", cfg.getValueBool("skip_dotfiles")); - if (cfg.getValueBool("skip_dotfiles")) { - selectiveSync.setSkipDotfiles(); - } - - // Handle skip_file configuration in config file - log.vdebug("Configuring skip_file ..."); - // Validate skip_file to ensure that this does not contain an invalid configuration - // Do not use a skip_file entry of .* as this will prevent correct searching of local changes to process. - foreach(entry; cfg.getValueString("skip_file").split("|")){ - if (entry == ".*") { - // invalid entry element detected - log.logAndNotify("ERROR: Invalid skip_file entry '.*' detected"); - return EXIT_FAILURE; - } - } - // All skip_file entries are valid - log.vdebug("skip_file: ", cfg.getValueString("skip_file")); - selectiveSync.setFileMask(cfg.getValueString("skip_file")); - - // Implement https://github.com/abraunegg/onedrive/issues/1129 - // Force a synchronization of a specific folder, only when using --synchronize --single-directory and ignoring all non-default skip_dir and skip_file rules - if ((cfg.getValueBool("synchronize")) && (cfg.getValueString("single_directory") != "") && (cfg.getValueBool("force_sync"))) { - log.log("\nWARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --synchronize --single-directory --force-sync being used"); - // performing this action could have undesirable effects .. the user must accept this risk - // what is the risk acceptance? - bool resyncRiskAcceptance = false; - - // need to prompt user - char response; - // warning message - writeln("\nThe use of --force-sync will reconfigure the application to use defaults. This may have untold and unknown future impacts."); - writeln("By proceeding in using this option you accept any impacts including any data loss that may occur as a result of using --force-sync."); - write("\nAre you sure you wish to proceed with --force-sync [Y/N] "); - - try { - // Attempt to read user response - readf(" %c\n", &response); - } catch (std.format.FormatException e) { - // Caught an error - return EXIT_FAILURE; - } - - // Evaluate user repsonse - if ((to!string(response) == "y") || (to!string(response) == "Y")) { - // User has accepted --force-sync risk to proceed - resyncRiskAcceptance = true; - // Are you sure you wish .. does not use writeln(); - write("\n"); - } - - // Action based on response - if (!resyncRiskAcceptance){ - // --force-sync not accepted - return EXIT_FAILURE; - } else { - // --force-sync risk accepted - // reset set config using function to use application defaults - cfg.resetSkipToDefaults(); - // update sync engine regex with reset defaults - selectiveSync.setDirMask(cfg.getValueString("skip_dir")); - selectiveSync.setFileMask(cfg.getValueString("skip_file")); - } - } - - // Initialize the sync engine - auto sync = new SyncEngine(cfg, oneDrive, itemDb, selectiveSync); - try { - if (!initSyncEngine(sync)) { - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } else { - if ((cfg.getValueString("get_file_link") == "") && (cfg.getValueString("create_share_link") == "")) { - // Print out that we are initializing the engine only if we are not grabbing the file link or creating a shareable link - log.logAndNotify("Initializing the Synchronization Engine ..."); - } - } - } catch (CurlException e) { - if (!cfg.getValueBool("monitor")) { - log.log("\nNo Internet connection."); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - } - - // if sync list is configured, set to true now that the sync engine is initialised - if (syncListConfigured) { - sync.setSyncListConfigured(); - } - - // Do we need to configure specific --upload-only options? - if (cfg.getValueBool("upload_only")) { - // --upload-only was passed in or configured - log.vdebug("Configuring uploadOnly flag to TRUE as --upload-only passed in or configured"); - sync.setUploadOnly(); - // was --no-remote-delete passed in or configured - if (cfg.getValueBool("no_remote_delete")) { - // Configure the noRemoteDelete flag - log.vdebug("Configuring noRemoteDelete flag to TRUE as --no-remote-delete passed in or configured"); - sync.setNoRemoteDelete(); - } - // was --remove-source-files passed in or configured - if (cfg.getValueBool("remove_source_files")) { - // Configure the localDeleteAfterUpload flag - log.vdebug("Configuring localDeleteAfterUpload flag to TRUE as --remove-source-files passed in or configured"); - sync.setLocalDeleteAfterUpload(); - } - } - - // Do we configure to disable the upload validation routine - if (cfg.getValueBool("disable_upload_validation")) sync.setDisableUploadValidation(); - - // Do we configure to disable the download validation routine - if (cfg.getValueBool("disable_download_validation")) sync.setDisableDownloadValidation(); - - // Has the user enabled to bypass data preservation of renaming local files when there is a conflict? - if (cfg.getValueBool("bypass_data_preservation")) { - log.log("WARNING: Application has been configured to bypass local data preservation in the event of file conflict."); - log.log("WARNING: Local data loss MAY occur in this scenario."); - sync.setBypassDataPreservation(); } - // Do we configure to clean up local files if using --download-only ? - if ((cfg.getValueBool("download_only")) && (cfg.getValueBool("cleanup_local_files"))) { - // --download-only and --cleanup-local-files were passed in - log.log("WARNING: Application has been configured to cleanup local files that are not present online."); - log.log("WARNING: Local data loss MAY occur in this scenario if you are expecting data to remain archived locally."); - sync.setCleanupLocalFiles(); - // Set the global flag as we will use this as thhe item to be passed into the sync function below - cleanupLocalFilesGlobal = true; - } - - // Are we configured to use a National Cloud Deployment - if (cfg.getValueString("azure_ad_endpoint") != "") { - // value is configured, is it a valid value? - if ((cfg.getValueString("azure_ad_endpoint") == "USL4") || (cfg.getValueString("azure_ad_endpoint") == "USL5") || (cfg.getValueString("azure_ad_endpoint") == "DE") || (cfg.getValueString("azure_ad_endpoint") == "CN")) { - // valid entries to flag we are using a National Cloud Deployment - // National Cloud Deployments do not support /delta as a query - // https://docs.microsoft.com/en-us/graph/deployments#supported-features - // Flag that we have a valid National Cloud Deployment that cannot use /delta queries - sync.setNationalCloudDeployment(); + // Configure the sync direcory based on the runtimeSyncDirectory configured directory + log.vlog("All application operations will be performed in the configured local 'sync_dir' directory: ", runtimeSyncDirectory); + try { + if (!exists(runtimeSyncDirectory)) { + log.vdebug("runtimeSyncDirectory: Configured 'sync_dir' is missing locally. Creating: ", runtimeSyncDirectory); + try { + // Attempt to create the sync dir we have been configured with + mkdirRecurse(runtimeSyncDirectory); + // Configure the applicable permissions for the folder + log.vdebug("Setting directory permissions for: ", runtimeSyncDirectory); + runtimeSyncDirectory.setAttributes(appConfig.returnRequiredDirectoryPermisions()); + } catch (std.file.FileException e) { + // Creating the sync directory failed + log.error("ERROR: Unable to create the configured local 'sync_dir' directory: ", e.msg); + // Use exit scopes to shutdown API + return EXIT_FAILURE; + } } + } catch (std.file.FileException e) { + // Creating the sync directory failed + log.error("ERROR: Unable to test for the existence of the configured local 'sync_dir' directory: ", e.msg); + // Use exit scopes to shutdown API + return EXIT_FAILURE; } - // Are we forcing to use /children scan instead of /delta to simulate National Cloud Deployment use of /children? - if (cfg.getValueBool("force_children_scan")) { - log.log("Forcing client to use /children scan rather than /delta to simulate National Cloud Deployment use of /children"); - sync.setNationalCloudDeployment(); - } + // Change the working directory to the 'sync_dir' as configured + chdir(runtimeSyncDirectory); - // Do we need to display the function processing timing - if (cfg.getValueBool("display_processing_time")) { - log.log("Forcing client to display function processing times"); - sync.setPerformanceProcessingOutput(); - } - - // Do we need to validate the syncDir to check for the presence of a '.nosync' file - if (cfg.getValueBool("check_nomount")) { - // we were asked to check the mounts - if (exists(syncDir ~ "/.nosync")) { - log.logAndNotify("ERROR: .nosync file found. Aborting synchronization process to safeguard data."); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - } - - // Do we need to create or remove a directory? - if ((cfg.getValueString("create_directory") != "") || (cfg.getValueString("remove_directory") != "")) { - // create directory - if (cfg.getValueString("create_directory") != "") { - // create a directory on OneDrive - sync.createDirectoryNoSync(cfg.getValueString("create_directory")); - } - //remove directory - if (cfg.getValueString("remove_directory") != "") { - // remove a directory on OneDrive - sync.deleteDirectoryNoSync(cfg.getValueString("remove_directory")); - } - } - - // Are we renaming or moving a directory? - if ((cfg.getValueString("source_directory") != "") && (cfg.getValueString("destination_directory") != "")) { - // We are renaming or moving a directory - sync.renameDirectoryNoSync(cfg.getValueString("source_directory"), cfg.getValueString("destination_directory")); - } - - // Are we obtaining the Office 365 Drive ID for a given Office 365 SharePoint Shared Library? - if (cfg.getValueString("get_o365_drive_id") != "") { - sync.querySiteCollectionForDriveID(cfg.getValueString("get_o365_drive_id")); - // Exit application - // Use exit scopes to shutdown API and cleanup data - return EXIT_SUCCESS; - } - - // --create-share-link - Are we createing a shareable link for an existing file on OneDrive? - if (cfg.getValueString("create_share_link") != "") { - // Query OneDrive for the file, and if valid, create a shareable link for the file - - // By default, the shareable link will be read-only. - // If the user adds: - // --with-editing-perms - // this will create a writeable link - bool writeablePermissions = cfg.getValueBool("with_editing_perms"); - sync.createShareableLinkForFile(cfg.getValueString("create_share_link"), writeablePermissions); - - // Exit application - // Use exit scopes to shutdown API - return EXIT_SUCCESS; - } - - // --get-file-link - Are we obtaining the URL path for a synced file? - if (cfg.getValueString("get_file_link") != "") { - // Query OneDrive for the file link - sync.queryOneDriveForFileDetails(cfg.getValueString("get_file_link"), syncDir, "URL"); - // Exit application - // Use exit scopes to shutdown API - return EXIT_SUCCESS; - } + // Do we need to validate the runtimeSyncDirectory to check for the presence of a '.nosync' file + checkForNoMountScenario(); - // --modified-by - Are we listing the modified-by details of a provided path? - if (cfg.getValueString("modified_by") != "") { - // Query OneDrive for the file link - sync.queryOneDriveForFileDetails(cfg.getValueString("modified_by"), syncDir, "ModifiedBy"); - // Exit application - // Use exit scopes to shutdown API - return EXIT_SUCCESS; - } - - // Are we listing OneDrive Business Shared Folders - if (cfg.getValueBool("list_business_shared_folders")) { - // Is this a business account type? - if (sync.getAccountType() == "business"){ - // List OneDrive Business Shared Folders - sync.listOneDriveBusinessSharedFolders(); - } else { - log.error("ERROR: Unsupported account type for listing OneDrive Business Shared Folders"); - } - // Exit application - // Use exit scopes to shutdown API - return EXIT_SUCCESS; - } - - // Are we going to sync OneDrive Business Shared Folders - if (cfg.getValueBool("sync_business_shared_folders")) { - // Is this a business account type? - if (sync.getAccountType() == "business"){ - // Configure flag to sync business folders - sync.setSyncBusinessFolders(); - } else { - log.error("ERROR: Unsupported account type for syncing OneDrive Business Shared Folders"); - } - } + // Set the default thread pool value - hard coded to 16 + defaultPoolThreads(to!int(appConfig.concurrentThreads)); - // Ensure that the value stored for cfg.getValueString("single_directory") does not contain any extra quotation marks - if (cfg.getValueString("single_directory") != ""){ - string originalSingleDirectoryValue = cfg.getValueString("single_directory"); - // Strip quotation marks from provided path to ensure no issues within a Docker environment when using passed in values - string updatedSingleDirectoryValue = strip(originalSingleDirectoryValue, "\""); - cfg.setValueString("single_directory", updatedSingleDirectoryValue); - } - - // Are we displaying the sync status of the client? - if (cfg.getValueBool("display_sync_status")) { + // Is the sync engine initiallised correctly? + if (appConfig.syncEngineWasInitialised) { + // Configure some initial variables + string singleDirectoryPath; + string localPath = "."; string remotePath = "/"; - // Are we doing a single directory check? - if (cfg.getValueString("single_directory") != ""){ - // Need two different path strings here - remotePath = cfg.getValueString("single_directory"); + + // Are we doing a single directory operation (--single-directory) ? + if (!appConfig.getValueString("single_directory").empty) { + // Set singleDirectoryPath + singleDirectoryPath = appConfig.getValueString("single_directory"); + + // Ensure that this is a normalised relative path to runtimeSyncDirectory + string normalisedRelativePath = replace(buildNormalizedPath(absolutePath(singleDirectoryPath)), buildNormalizedPath(absolutePath(runtimeSyncDirectory)), "." ); + + // The user provided a directory to sync within the configured 'sync_dir' path + // This also validates if the path being used exists online and/or does not have a 'case-insensitive match' + syncEngineInstance.setSingleDirectoryScope(normalisedRelativePath); + + // Does the directory we want to sync actually exist locally? + if (!exists(singleDirectoryPath)) { + // The requested path to use with --single-directory does not exist locally within the configured 'sync_dir' + log.logAndNotify("WARNING: The requested path for --single-directory does not exist locally. Creating requested path within ", runtimeSyncDirectory); + // Make the required --single-directory path locally + mkdirRecurse(singleDirectoryPath); + // Configure the applicable permissions for the folder + log.vdebug("Setting directory permissions for: ", singleDirectoryPath); + singleDirectoryPath.setAttributes(appConfig.returnRequiredDirectoryPermisions()); + } + + // Update the paths that we use to perform the sync actions + localPath = singleDirectoryPath; + remotePath = singleDirectoryPath; + + // Display that we are syncing from a specific path due to --single-directory + log.vlog("Syncing changes from this selected path: ", singleDirectoryPath); } - sync.queryDriveForChanges(remotePath); - } - // Are we performing a sync, or monitor operation? - if ((cfg.getValueBool("synchronize")) || (cfg.getValueBool("monitor"))) { - // Initialise the monitor class, so that we can do more granular inotify handling when performing the actual sync - // needed for --synchronize and --monitor handling - Monitor m = new Monitor(selectiveSync); - - if (cfg.getValueBool("synchronize")) { - if (online) { - // set flag for exit scope - synchronizeConfigured = true; - - // Check user entry for local path - the above chdir means we are already in ~/OneDrive/ thus singleDirectory is local to this path - if (cfg.getValueString("single_directory") != "") { - // Does the directory we want to sync actually exist? - if (!exists(cfg.getValueString("single_directory"))) { - // The requested path to use with --single-directory does not exist locally within the configured 'sync_dir' - log.logAndNotify("WARNING: The requested path for --single-directory does not exist locally. Creating requested path within ", syncDir); - // Make the required --single-directory path locally - string singleDirectoryPath = cfg.getValueString("single_directory"); - mkdirRecurse(singleDirectoryPath); - // Configure the applicable permissions for the folder - log.vdebug("Setting directory permissions for: ", singleDirectoryPath); - singleDirectoryPath.setAttributes(cfg.returnRequiredDirectoryPermisions()); - } + // Are we doing a --sync operation? This includes doing any --single-directory operations + if (appConfig.getValueBool("synchronize")) { + // Did the user specify --upload-only? + if (appConfig.getValueBool("upload_only")) { + // Perform the --upload-only sync process + performUploadOnlySyncProcess(localPath); + } + + // Did the user specify --download-only? + if (appConfig.getValueBool("download_only")) { + // Only download data from OneDrive + syncEngineInstance.syncOneDriveAccountToLocalDisk(); + // Perform the DB consistency check + // This will also delete any out-of-sync flagged items if configured to do so + syncEngineInstance.performDatabaseConsistencyAndIntegrityCheck(); + // Do we cleanup local files? + // - Deletes of data from online will already have been performed, but what we are now doing is searching the local filesystem + // for any new data locally, that usually would be uploaded to OneDrive, but instead, because of the options being + // used, will need to be deleted from the local filesystem + if (appConfig.getValueBool("cleanup_local_files")) { + // Perform the filesystem walk + syncEngineInstance.scanLocalFilesystemPathForNewData(localPath); } - // perform a --synchronize sync - // fullScanRequired = false, for final true-up - // but if we have sync_list configured, use syncListConfigured which = true - performSync(sync, cfg.getValueString("single_directory"), cfg.getValueBool("download_only"), cfg.getValueBool("local_first"), cfg.getValueBool("upload_only"), LOG_NORMAL, false, syncListConfigured, displaySyncOptions, cfg.getValueBool("monitor"), m, cleanupLocalFilesGlobal); - - // Write WAL and SHM data to file for this sync - log.vdebug("Merge contents of WAL and SHM files into main database file"); - itemDb.performVacuum(); } + + // If no use of --upload-only or --download-only + if ((!appConfig.getValueBool("upload_only")) && (!appConfig.getValueBool("download_only"))) { + // Perform the standard sync process + performStandardSyncProcess(localPath); + } + + // Detail the outcome of the sync process + displaySyncOutcome(); } - - if (cfg.getValueBool("monitor")) { - log.logAndNotify("Initializing monitor ..."); - log.log("OneDrive monitor interval (seconds): ", cfg.getValueLong("monitor_interval")); - - m.onDirCreated = delegate(string path) { + + // Are we doing a --monitor operation? + if (appConfig.getValueBool("monitor")) { + // What are the current values for the platform we are running on + // Max number of open files /proc/sys/fs/file-max + string maxOpenFiles = strip(readText("/proc/sys/fs/file-max")); + // What is the currently configured maximum inotify watches that can be used + // /proc/sys/fs/inotify/max_user_watches + string maxInotifyWatches = strip(readText("/proc/sys/fs/inotify/max_user_watches")); + + // Start the monitor process + log.log("OneDrive synchronisation interval (seconds): ", appConfig.getValueLong("monitor_interval")); + + // If we are in a --download-only method of operation, the output of these is not required + if (!appConfig.getValueBool("download_only")) { + log.vlog("Maximum allowed open files: ", maxOpenFiles); + log.vlog("Maximum allowed inotify user watches: ", maxInotifyWatches); + } + + // Configure the monitor class + Monitor filesystemMonitor = new Monitor(appConfig, selectiveSync); + + // Delegated function for when inotify detects a new local directory has been created + filesystemMonitor.onDirCreated = delegate(string path) { // Handle .folder creation if skip_dotfiles is enabled - if ((cfg.getValueBool("skip_dotfiles")) && (selectiveSync.isDotFile(path))) { + if ((appConfig.getValueBool("skip_dotfiles")) && (isDotFile(path))) { log.vlog("[M] Skipping watching local path - .folder found & --skip-dot-files enabled: ", path); } else { log.vlog("[M] Local directory created: ", path); try { - sync.scanForDifferences(path); + syncEngineInstance.scanLocalFilesystemPathForNewData(path); } catch (CurlException e) { log.vlog("Offline, cannot create remote dir!"); } catch(Exception e) { @@ -1454,26 +648,30 @@ int main(string[] args) } } }; - m.onFileChanged = delegate(string path) { + + // Delegated function for when inotify detects a local file has been changed + filesystemMonitor.onFileChanged = delegate(string path) { log.vlog("[M] Local file changed: ", path); try { - sync.scanForDifferences(path); + syncEngineInstance.handleLocalFileTrigger(path); } catch (CurlException e) { log.vlog("Offline, cannot upload changed item!"); } catch(Exception e) { log.logAndNotify("Cannot upload file changes/creation: ", e.msg); } }; - m.onDelete = delegate(string path) { - log.log("Received inotify delete event from operating system .. attempting item deletion as requested"); + + // Delegated function for when inotify detects a delete event + filesystemMonitor.onDelete = delegate(string path) { log.vlog("[M] Local item deleted: ", path); try { - sync.deleteByPath(path); + log.log("The operating system sent a deletion notification. Trying to delete the item as requested"); + syncEngineInstance.deleteByPath(path); } catch (CurlException e) { log.vlog("Offline, cannot delete item!"); } catch(SyncException e) { if (e.msg == "The item to delete is not in the local database") { - log.vlog("Item cannot be deleted from OneDrive because it was not found in the local database"); + log.vlog("Item cannot be deleted from Microsoft OneDrive because it was not found in the local database"); } else { log.logAndNotify("Cannot delete remote item: ", e.msg); } @@ -1481,15 +679,17 @@ int main(string[] args) log.logAndNotify("Cannot delete remote item: ", e.msg); } }; - m.onMove = delegate(string from, string to) { + + // Delegated function for when inotify detects a move event + filesystemMonitor.onMove = delegate(string from, string to) { log.vlog("[M] Local item moved: ", from, " -> ", to); try { // Handle .folder -> folder if skip_dotfiles is enabled - if ((cfg.getValueBool("skip_dotfiles")) && (selectiveSync.isDotFile(from))) { + if ((appConfig.getValueBool("skip_dotfiles")) && (isDotFile(from))) { // .folder -> folder handling - has to be handled as a new folder - sync.scanForDifferences(to); + syncEngineInstance.scanLocalFilesystemPathForNewData(to); } else { - sync.uploadMoveItem(from, to); + syncEngineInstance.uploadMoveItem(from, to); } } catch (CurlException e) { log.vlog("Offline, cannot move item!"); @@ -1497,236 +697,182 @@ int main(string[] args) log.logAndNotify("Cannot move item: ", e.msg); } }; + + // Handle SIGINT and SIGTERM signal(SIGINT, &exitHandler); signal(SIGTERM, &exitHandler); - - // attempt to initialise monitor class - if (!cfg.getValueBool("download_only")) { + + // Initialise the local filesystem monitor class using inotify to monitor for local filesystem changes + // If we are in a --download-only method of operation, we do not enable local filesystem monitoring + if (!appConfig.getValueBool("download_only")) { + // Not using --download-only try { - m.init(cfg, cfg.getValueLong("verbose") > 0, cfg.getValueBool("skip_symlinks"), cfg.getValueBool("check_nosync")); - } catch (MonitorException e) { - // monitor initialisation failed + log.log("Initialising filesystem inotify monitoring ..."); + filesystemMonitor.initialise(); + log.log("Performing initial syncronisation to ensure consistent local state ..."); + } catch (MonitorException e) { + // monitor class initialisation failed log.error("ERROR: ", e.msg); - oneDrive.shutdown(); return EXIT_FAILURE; } } - - // monitor loop + + // Filesystem monitor loop bool performMonitor = true; ulong monitorLoopFullCount = 0; - immutable auto checkInterval = dur!"seconds"(cfg.getValueLong("monitor_interval")); + ulong fullScanFrequencyLoopCount = 0; + ulong monitorLogOutputLoopCount = 0; + immutable auto checkOnlineInterval = dur!"seconds"(appConfig.getValueLong("monitor_interval")); immutable auto githubCheckInterval = dur!"seconds"(86400); - immutable long logInterval = cfg.getValueLong("monitor_log_frequency"); - immutable long fullScanFrequency = cfg.getValueLong("monitor_fullscan_frequency"); + immutable ulong fullScanFrequency = appConfig.getValueLong("monitor_fullscan_frequency"); + immutable ulong logOutputSupressionInterval = appConfig.getValueLong("monitor_log_frequency"); MonoTime lastCheckTime = MonoTime.currTime(); MonoTime lastGitHubCheckTime = MonoTime.currTime(); + string loopStartOutputMessage = "################################################## NEW LOOP ##################################################"; + string loopStopOutputMessage = "################################################ LOOP COMPLETE ###############################################"; - long logMonitorCounter = 0; - long fullScanCounter = 0; - // set fullScanRequired to true so that at application startup we perform a full walk - bool fullScanRequired = true; - bool syncListConfiguredFullScanOverride = false; - // if sync list is configured, set to true - if (syncListConfigured) { - // sync list is configured - syncListConfiguredFullScanOverride = true; - } - immutable bool webhookEnabled = cfg.getValueBool("webhook_enabled"); - while (performMonitor) { - if (!cfg.getValueBool("download_only")) { + + // Do we need to validate the runtimeSyncDirectory to check for the presence of a '.nosync' file - the disk may have been ejected .. + checkForNoMountScenario(); + + // If we are in a --download-only method of operation, there is no filesystem monitoring, so no inotify events to check + if (!appConfig.getValueBool("download_only")) { try { - m.update(online); + // Process any inotify events + filesystemMonitor.update(true); } catch (MonitorException e) { // Catch any exceptions thrown by inotify / monitor engine log.error("ERROR: The following inotify error was generated: ", e.msg); } } - + // Check for notifications pushed from Microsoft to the webhook bool notificationReceived = false; - if (webhookEnabled) { - // Create a subscription on the first run, or renew the subscription - // on subsequent runs when it is about to expire. - oneDrive.createOrRenewSubscription(); - - // Process incoming notifications if any. - - // Empirical evidence shows that Microsoft often sends multiple - // notifications for one single change, so we need a loop to exhaust - // all signals that were queued up by the webhook. The notifications - // do not contain any actual changes, and we will always rely do the - // delta endpoint to sync to latest. Therefore, only one sync run is - // good enough to catch up for multiple notifications. - for (int signalCount = 0;; signalCount++) { - const auto signalExists = receiveTimeout(dur!"seconds"(-1), (ulong _) {}); - if (signalExists) { - notificationReceived = true; + + // Check here for a webhook notification + + // Get the current time this loop is starting + auto currentTime = MonoTime.currTime(); + + // Do we perform a sync with OneDrive? + if (notificationReceived || (currentTime - lastCheckTime > checkOnlineInterval) || (monitorLoopFullCount == 0)) { + // Increment relevant counters + monitorLoopFullCount++; + fullScanFrequencyLoopCount++; + monitorLogOutputLoopCount++; + + // If full scan at a specific frequency enabled? + if (fullScanFrequency > 0) { + // Full Scan set for some 'frequency' - do we flag to perform a full scan of the online data? + if (fullScanFrequencyLoopCount > fullScanFrequency) { + // set full scan trigger for true up + log.vdebug("Enabling Full Scan True Up (fullScanFrequencyLoopCount > fullScanFrequency), resetting fullScanFrequencyLoopCount = 1"); + fullScanFrequencyLoopCount = 1; + appConfig.fullScanTrueUpRequired = true; } else { - if (notificationReceived) { - log.log("Received ", signalCount," refresh signals from the webhook"); - } - break; + // unset full scan trigger for true up + log.vdebug("Disabling Full Scan True Up"); + appConfig.fullScanTrueUpRequired = false; } + } else { + // No it is disabled - ensure this is false + appConfig.fullScanTrueUpRequired = false; } - } - - auto currTime = MonoTime.currTime(); - // has monitor_interval elapsed or are we at application startup / monitor startup? - // in a --resync scenario, if we have not 're-populated' the database, valid changes will get skipped: - // Monitor directory: ./target - // Monitor directory: target/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby - // [M] Item moved: random_files/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby -> target/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby - // Moving random_files/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby to target/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby - // Skipping uploading this new file as parent path is not in the database: target/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby - // 'target' should be in the DB, it should also exist online, but because of --resync, it does not exist in the database thus parent check fails - if (notificationReceived || (currTime - lastCheckTime > checkInterval) || (monitorLoopFullCount == 0)) { - // Check Application Version against GitHub once per day - if (currTime - lastGitHubCheckTime > githubCheckInterval) { - // --monitor GitHub Application Version Check time expired - checkApplicationVersion(); - // update when we have performed this check - lastGitHubCheckTime = MonoTime.currTime(); - } - // monitor sync loop - logOutputMessage = "################################################## NEW LOOP ##################################################"; - if (displaySyncOptions) { - log.log(logOutputMessage); + log.vdebug(loopStartOutputMessage); + log.vdebug("Total Run-Time Loop Number: ", monitorLoopFullCount); + log.vdebug("Full Scan Freqency Loop Number: ", fullScanFrequencyLoopCount); + SysTime startFunctionProcessingTime = Clock.currTime(); + log.vdebug("Start Monitor Loop Time: ", startFunctionProcessingTime); + + // Do we perform any monitor console logging output surpression? + // 'monitor_log_frequency' controls how often, in a non-verbose application output mode, how often + // the full output of what is occuring is done. This is done to lessen the 'verbosity' of non-verbose + // logging, but only when running in --monitor + if (monitorLogOutputLoopCount > logOutputSupressionInterval) { + // unsurpress the logging output + monitorLogOutputLoopCount = 1; + log.vdebug("Unsuppressing log output"); + appConfig.surpressLoggingOutput = false; } else { - log.vdebug(logOutputMessage); - } - // Increment monitorLoopFullCount - monitorLoopFullCount++; - // Display memory details at start of loop - if (displayMemoryUsage) { - log.displayMemoryUsagePreGC(); - } - - // log monitor output suppression - logMonitorCounter += 1; - if (logMonitorCounter > logInterval) { - logMonitorCounter = 1; - } - - // do we perform a full scan of sync_dir and database integrity check? - fullScanCounter += 1; - // fullScanFrequency = 'monitor_fullscan_frequency' from config - if (fullScanCounter > fullScanFrequency){ - // 'monitor_fullscan_frequency' counter has exceeded - fullScanCounter = 1; - // set fullScanRequired = true due to 'monitor_fullscan_frequency' counter has been exceeded - fullScanRequired = true; - // are we using sync_list? - if (syncListConfigured) { - // sync list is configured - syncListConfiguredFullScanOverride = true; + // do we surpress the logging output to absolute minimal + if (monitorLoopFullCount == 1) { + // application startup with --monitor + log.vdebug("Unsuppressing initial sync log output"); + appConfig.surpressLoggingOutput = false; + } else { + // only surpress if we are not doing --verbose or higher + if (log.verbose == 0) { + log.vdebug("Suppressing --monitor log output"); + appConfig.surpressLoggingOutput = true; + } else { + log.vdebug("Unsuppressing log output"); + appConfig.surpressLoggingOutput = false; + } } } - - if (displaySyncOptions) { - // sync option handling per sync loop - log.log("fullScanCounter = ", fullScanCounter); - log.log("syncListConfigured = ", syncListConfigured); - log.log("fullScanRequired = ", fullScanRequired); - log.log("syncListConfiguredFullScanOverride = ", syncListConfiguredFullScanOverride); - } else { - // sync option handling per sync loop via debug - log.vdebug("fullScanCounter = ", fullScanCounter); - log.vdebug("syncListConfigured = ", syncListConfigured); - log.vdebug("fullScanRequired = ", fullScanRequired); - log.vdebug("syncListConfiguredFullScanOverride = ", syncListConfiguredFullScanOverride); - } - - try { - if (!initSyncEngine(sync)) { - // Use exit scopes to shutdown API - return EXIT_FAILURE; + + // How long has the application been running for? + auto elapsedTime = Clock.currTime() - applicationStartTime; + log.vdebug("Application run-time thus far: ", elapsedTime); + + // Need to re-validate that the client is still online for this loop + if (testInternetReachability(appConfig)) { + // Starting a sync + log.log("Starting a sync with Microsoft OneDrive"); + + // Attempt to reset syncFailures + syncEngineInstance.resetSyncFailures(); + + // Did the user specify --upload-only? + if (appConfig.getValueBool("upload_only")) { + // Perform the --upload-only sync process + performUploadOnlySyncProcess(localPath, filesystemMonitor); + } else { + // Perform the standard sync process + performStandardSyncProcess(localPath, filesystemMonitor); } - try { - // performance timing - SysTime startSyncProcessingTime = Clock.currTime(); - - // perform a --monitor sync - if ((cfg.getValueLong("verbose") > 0) || (logMonitorCounter == logInterval) || (fullScanRequired) ) { - // log to console and log file if enabled - if (cfg.getValueBool("display_processing_time")) { - log.log(startMessage, " ", startSyncProcessingTime); - } else { - log.log(startMessage); - } - } else { - // log file only if enabled so we know when a sync started when not using --verbose - log.fileOnly(startMessage); - } - performSync(sync, cfg.getValueString("single_directory"), cfg.getValueBool("download_only"), cfg.getValueBool("local_first"), cfg.getValueBool("upload_only"), (logMonitorCounter == logInterval ? MONITOR_LOG_QUIET : MONITOR_LOG_SILENT), fullScanRequired, syncListConfiguredFullScanOverride, displaySyncOptions, cfg.getValueBool("monitor"), m, cleanupLocalFilesGlobal); - if (!cfg.getValueBool("download_only")) { - // discard all events that may have been generated by the sync that have not already been handled - try { - m.update(false); - } catch (MonitorException e) { - // Catch any exceptions thrown by inotify / monitor engine - log.error("ERROR: The following inotify error was generated: ", e.msg); - } - } - SysTime endSyncProcessingTime = Clock.currTime(); - if ((cfg.getValueLong("verbose") > 0) || (logMonitorCounter == logInterval) || (fullScanRequired) ) { - // log to console and log file if enabled - if (cfg.getValueBool("display_processing_time")) { - log.log(finishMessage, " ", endSyncProcessingTime); - log.log("Elapsed Sync Time with OneDrive Service: ", (endSyncProcessingTime - startSyncProcessingTime)); - } else { - log.log(finishMessage); - } - } else { - // log file only if enabled so we know when a sync completed when not using --verbose - log.fileOnly(finishMessage); - } - } catch (CurlException e) { - // we already tried three times in the performSync routine - // if we still have problems, then the sync handle might have - // gone stale and we need to re-initialize the sync engine - log.log("Persistent connection errors, reinitializing connection"); - sync.reset(); + + // Discard any inotify events generated as part of any sync operation + filesystemMonitor.update(false); + + // Detail the outcome of the sync process + displaySyncOutcome(); + + if (appConfig.fullScanTrueUpRequired) { + // Write WAL and SHM data to file for this loop + log.vdebug("Merge contents of WAL and SHM files into main database file"); + itemDB.performVacuum(); } - } catch (CurlException e) { - log.log("Cannot initialize connection to OneDrive"); + } else { + // Not online + log.log("Microsoft OneDrive service is not reachable at this time. Will re-try on next loop attempt."); } - // performSync complete, set lastCheckTime to current time - lastCheckTime = MonoTime.currTime(); + + // Output end of loop processing times + SysTime endFunctionProcessingTime = Clock.currTime(); + log.vdebug("End Monitor Loop Time: ", endFunctionProcessingTime); + log.vdebug("Elapsed Monitor Loop Processing Time: ", (endFunctionProcessingTime - startFunctionProcessingTime)); // Display memory details before cleanup if (displayMemoryUsage) log.displayMemoryUsagePreGC(); // Perform Garbage Cleanup GC.collect(); + // Return free memory to the OS + GC.minimize(); // Display memory details after cleanup if (displayMemoryUsage) log.displayMemoryUsagePostGC(); - // If we did a full scan, make sure we merge the conents of the WAL and SHM to disk - if (fullScanRequired) { - // Write WAL and SHM data to file for this loop - log.vdebug("Merge contents of WAL and SHM files into main database file"); - itemDb.performVacuum(); - } - - // reset fullScanRequired and syncListConfiguredFullScanOverride - fullScanRequired = false; - if (syncListConfigured) syncListConfiguredFullScanOverride = false; + // Log that this loop is complete + log.vdebug(loopStopOutputMessage); + // performSync complete, set lastCheckTime to current time + lastCheckTime = MonoTime.currTime(); - // monitor loop complete - logOutputMessage = "################################################ LOOP COMPLETE ###############################################"; - - // Handle display options - if (displaySyncOptions) { - log.log(logOutputMessage); - } else { - log.vdebug(logOutputMessage); - } // Developer break via config option - if (cfg.getValueLong("monitor_max_loop") > 0) { + if (appConfig.getValueLong("monitor_max_loop") > 0) { // developer set option to limit --monitor loops - if (monitorLoopFullCount == (cfg.getValueLong("monitor_max_loop"))) { + if (monitorLoopFullCount == (appConfig.getValueLong("monitor_max_loop"))) { performMonitor = false; log.log("Exiting after ", monitorLoopFullCount, " loops due to developer set option"); } @@ -1736,359 +882,225 @@ int main(string[] args) Thread.sleep(dur!"seconds"(1)); } } + } else { + // Exit application as the sync engine could not be initialised + log.error("Application Sync Engine could not be initialised correctly"); + // Use exit scope + return EXIT_FAILURE; + } + + // Exit application using exit scope + if (!syncEngineInstance.syncFailures) { + return EXIT_SUCCESS; + } else { + return EXIT_FAILURE; } - - // Exit application - // Use exit scopes to shutdown API - return EXIT_SUCCESS; } -void cleanupDryRunDatabase(string databaseFilePathDryRun) -{ - // cleanup dry-run data - log.vdebug("Running cleanupDryRunDatabase"); - string dryRunShmFile = databaseFilePathDryRun ~ "-shm"; - string dryRunWalFile = databaseFilePathDryRun ~ "-wal"; - if (exists(databaseFilePathDryRun)) { - // remove the file - log.vdebug("Removing items-dryrun.sqlite3 as dry run operations complete"); - // remove items-dryrun.sqlite3 - safeRemove(databaseFilePathDryRun); - } - // silent cleanup of shm and wal files if they exist - if (exists(dryRunShmFile)) { - // remove items-dryrun.sqlite3-shm - safeRemove(dryRunShmFile); +void performUploadOnlySyncProcess(string localPath, Monitor filesystemMonitor = null) { + // Perform the local database consistency check, picking up locally modified data and uploading this to OneDrive + syncEngineInstance.performDatabaseConsistencyAndIntegrityCheck(); + if (appConfig.getValueBool("monitor")) { + // Handle any inotify events whilst the DB was being scanned + filesystemMonitor.update(true); } - if (exists(dryRunWalFile)) { - // remove items-dryrun.sqlite3-wal - safeRemove(dryRunWalFile); + + // Scan the configured 'sync_dir' for new data to upload + syncEngineInstance.scanLocalFilesystemPathForNewData(localPath); + if (appConfig.getValueBool("monitor")) { + // Handle any new inotify events whilst the local filesystem was being scanned + filesystemMonitor.update(true); } } -bool initSyncEngine(SyncEngine sync) -{ - try { - sync.init(); - } catch (OneDriveException e) { - if (e.httpStatusCode == 400 || e.httpStatusCode == 401) { - // Authorization is invalid - log.log("\nAuthorization token invalid, use --reauth to authorize the client again\n"); - return false; +void performStandardSyncProcess(string localPath, Monitor filesystemMonitor = null) { + + // If we are performing log supression, output this message so the user knows what is happening + if (appConfig.surpressLoggingOutput) { + log.log("Syncing changes from Microsoft OneDrive ..."); + } + + // Zero out these arrays + syncEngineInstance.fileDownloadFailures = []; + syncEngineInstance.fileUploadFailures = []; + + // Which way do we sync first? + // OneDrive first then local changes (normal operational process that uses OneDrive as the source of truth) + // Local First then OneDrive changes (alternate operation process to use local files as source of truth) + if (appConfig.getValueBool("local_first")) { + // Local data first + // Perform the local database consistency check, picking up locally modified data and uploading this to OneDrive + syncEngineInstance.performDatabaseConsistencyAndIntegrityCheck(); + if (appConfig.getValueBool("monitor")) { + // Handle any inotify events whilst the DB was being scanned + filesystemMonitor.update(true); + } + + // Scan the configured 'sync_dir' for new data to upload to OneDrive + syncEngineInstance.scanLocalFilesystemPathForNewData(localPath); + if (appConfig.getValueBool("monitor")) { + // Handle any new inotify events whilst the local filesystem was being scanned + filesystemMonitor.update(true); } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error, message already printed - return false; + + // Download data from OneDrive last + syncEngineInstance.syncOneDriveAccountToLocalDisk(); + if (appConfig.getValueBool("monitor")) { + // Cancel out any inotify events from downloading data + filesystemMonitor.update(false); + } + } else { + // Normal sync + // Download data from OneDrive first + syncEngineInstance.syncOneDriveAccountToLocalDisk(); + if (appConfig.getValueBool("monitor")) { + // Cancel out any inotify events from downloading data + filesystemMonitor.update(false); + } + + // Perform the local database consistency check, picking up locally modified data and uploading this to OneDrive + syncEngineInstance.performDatabaseConsistencyAndIntegrityCheck(); + if (appConfig.getValueBool("monitor")) { + // Handle any inotify events whilst the DB was being scanned + filesystemMonitor.update(true); + } + + // Scan the configured 'sync_dir' for new data to upload to OneDrive + syncEngineInstance.scanLocalFilesystemPathForNewData(localPath); + if (appConfig.getValueBool("monitor")) { + // Handle any new inotify events whilst the local filesystem was being scanned + filesystemMonitor.update(true); + } + + // Make sure we sync any DB data to this point, but only if not in --monitor mode + // In --monitor mode, this is handled within the 'loop', based on when the full scan true up is being performed + if (!appConfig.getValueBool("monitor")) { + itemDB.performVacuum(); + } + + // Perform the final true up scan to ensure we have correctly replicated the current online state locally + if (!appConfig.surpressLoggingOutput) { + log.log("Performing a last examination of the most recent online data within Microsoft OneDrive to complete the reconciliation process"); + } + // We pass in the 'appConfig.fullScanTrueUpRequired' value which then flags do we use the configured 'deltaLink' + // If 'appConfig.fullScanTrueUpRequired' is true, we do not use the 'deltaLink' if we are in --monitor mode, thus forcing a full scan true up + syncEngineInstance.syncOneDriveAccountToLocalDisk(); + if (appConfig.getValueBool("monitor")) { + // Cancel out any inotify events from downloading data + filesystemMonitor.update(false); } } - return true; } -// try to synchronize the folder three times -void performSync(SyncEngine sync, string singleDirectory, bool downloadOnly, bool localFirst, bool uploadOnly, long logLevel, bool fullScanRequired, bool syncListConfiguredFullScanOverride, bool displaySyncOptions, bool monitorEnabled, Monitor m, bool cleanupLocalFiles) -{ - int count; - string remotePath = "/"; - string localPath = "."; - string logOutputMessage; +void displaySyncOutcome() { - // performSync API scan triggers - log.vdebug("performSync API scan triggers"); - log.vdebug("-----------------------------"); - log.vdebug("fullScanRequired = ", fullScanRequired); - log.vdebug("syncListConfiguredFullScanOverride = ", syncListConfiguredFullScanOverride); - log.vdebug("-----------------------------"); - - // Are we doing a single directory sync? - if (singleDirectory != ""){ - // Need two different path strings here - remotePath = singleDirectory; - localPath = singleDirectory; - // Set flag for singleDirectoryScope for change handling - sync.setSingleDirectoryScope(); + // Detail any download or upload transfer failures + syncEngineInstance.displaySyncFailures(); + + // Sync is either complete or partially complete + if (!syncEngineInstance.syncFailures) { + // No download or upload issues + if (!appConfig.getValueBool("monitor")) writeln(); // Add an additional line break so that this is clear when using --sync + log.log("Sync with Microsoft OneDrive is complete"); + } else { + log.log("\nSync with Microsoft OneDrive has completed, however there are items that failed to sync."); + // Due to how the OneDrive API works 'changes' such as add new files online, rename files online, delete files online are only sent once when using the /delta API call. + // That we failed to download it, we need to track that, and then issue a --resync to download any of these failed files .. unfortunate, but there is no easy way here + if (!syncEngineInstance.fileDownloadFailures.empty) { + log.log("To fix any download failures you may need to perform a --resync to ensure this system is correctly synced with your Microsoft OneDrive Account"); + } + if (!syncEngineInstance.fileUploadFailures.empty) { + log.log("To fix any upload failures you may need to perform a --resync to ensure this system is correctly synced with your Microsoft OneDrive Account"); + } + // So that from a logging perspective these messages are clear, add a line break in + writeln(); } +} - // Due to Microsoft Sharepoint 'enrichment' of files, we try to download the Microsoft modified file automatically - // Set flag if we are in upload only state to handle this differently - // See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details - if (uploadOnly) sync.setUploadOnly(); - - do { - try { - // starting a sync - logOutputMessage = "################################################## NEW SYNC ##################################################"; - if (displaySyncOptions) { - log.log(logOutputMessage); - } else { - log.vdebug(logOutputMessage); - } - if (singleDirectory != ""){ - // we were requested to sync a single directory - log.vlog("Syncing changes from this selected path: ", singleDirectory); - if (uploadOnly){ - // Upload Only of selected single directory - if (logLevel < MONITOR_LOG_QUIET) log.log("Syncing changes from selected local path only - NOT syncing data changes from OneDrive ..."); - sync.scanForDifferences(localPath); - } else { - // No upload only - if (localFirst) { - // Local First - if (logLevel < MONITOR_LOG_QUIET) log.log("Syncing changes from selected local path first before downloading changes from OneDrive ..."); - sync.scanForDifferences(localPath); - sync.applyDifferencesSingleDirectory(remotePath); - } else { - // OneDrive First - if (logLevel < MONITOR_LOG_QUIET) log.log("Syncing changes from selected OneDrive path ..."); - sync.applyDifferencesSingleDirectory(remotePath); - - // Is this a --download-only --cleanup-local-files request? - // If yes, scan for local changes - but --cleanup-local-files is being used, a further flag will trigger local file deletes rather than attempt to upload files to OneDrive - if (cleanupLocalFiles) { - // --download-only and --cleanup-local-files were passed in - log.log("Searching local filesystem for extra files and folders which need to be removed"); - sync.scanForDifferencesFilesystemScan(localPath); - } else { - // is this a --download-only request? - if (!downloadOnly) { - // process local changes - sync.scanForDifferences(localPath); - // ensure that the current remote state is updated locally - sync.applyDifferencesSingleDirectory(remotePath); - } - } - } - } - } else { - // no single directory sync - if (uploadOnly){ - // Upload Only of entire sync_dir - if (logLevel < MONITOR_LOG_QUIET) log.log("Syncing changes from local path only - NOT syncing data changes from OneDrive ..."); - sync.scanForDifferences(localPath); - } else { - // No upload only - string syncCallLogOutput; - if (localFirst) { - // sync local files first before downloading from OneDrive - if (logLevel < MONITOR_LOG_QUIET) log.log("Syncing changes from local path first before downloading changes from OneDrive ..."); - sync.scanForDifferences(localPath); - // if syncListConfiguredFullScanOverride = true - if (syncListConfiguredFullScanOverride) { - // perform a full walk of OneDrive objects - sync.applyDifferences(syncListConfiguredFullScanOverride); - } else { - // perform a walk based on if a full scan is required - sync.applyDifferences(fullScanRequired); - } - } else { - // sync from OneDrive first before uploading files to OneDrive - if ((logLevel < MONITOR_LOG_SILENT) || (fullScanRequired)) log.log("Syncing changes and items from OneDrive ..."); - - // For the initial sync, always use the delta link so that we capture all the right delta changes including adds, moves & deletes - logOutputMessage = "Initial Scan: Call OneDrive Delta API for delta changes as compared to last successful sync."; - syncCallLogOutput = "Calling sync.applyDifferences(false);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - sync.applyDifferences(false); - - // Is this a --download-only --cleanup-local-files request? - // If yes, scan for local changes - but --cleanup-local-files is being used, a further flag will trigger local file deletes rather than attempt to upload files to OneDrive - if (cleanupLocalFiles) { - // --download-only and --cleanup-local-files were passed in - log.log("Searching local filesystem for extra files and folders which need to be removed"); - sync.scanForDifferencesFilesystemScan(localPath); - } else { - // is this a --download-only request? - if (!downloadOnly) { - // process local changes walking the entire path checking for changes - // in monitor mode all local changes are captured via inotify - // thus scanning every 'monitor_interval' (default 300 seconds) for local changes is excessive and not required - logOutputMessage = "Process local filesystem (sync_dir) for file changes as compared to database entries"; - syncCallLogOutput = "Calling sync.scanForDifferences(localPath);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - - SysTime startIntegrityCheckProcessingTime = Clock.currTime(); - if (sync.getPerformanceProcessingOutput()) { - // performance timing for DB and file system integrity check - start - writeln("============================================================"); - writeln("Start Integrity Check Processing Time: ", startIntegrityCheckProcessingTime); - } - - // What sort of local scan do we want to do? - // In --monitor mode, when performing the DB scan, a race condition occurs where by if a file or folder is moved during this process - // the inotify event is discarded once performSync() is finished (see m.update(false) above), so these events need to be handled - // This can be remediated by breaking the DB and file system scan into separate processes, and handing any applicable inotify events in between - if (!monitorEnabled) { - // --synchronize in use - log.log("Performing a database consistency and integrity check on locally stored data ... "); - // standard process flow - sync.scanForDifferences(localPath); - } else { - // --monitor in use - // Use individual calls with inotify checks between to avoid a race condition between these 2 functions - // Database scan integrity check to compare DB data vs actual content on disk to ensure what we think is local, is local - // and that the data 'hash' as recorded in the DB equals the hash of the actual content - // This process can be extremely expensive time and CPU processing wise - // - // fullScanRequired is set to TRUE when the application starts up, or the config option 'monitor_fullscan_frequency' count is reached - // By default, 'monitor_fullscan_frequency' = 12, and 'monitor_interval' = 300, meaning that by default, a full database consistency check - // is done once an hour. - // - // To change this behaviour adjust 'monitor_interval' and 'monitor_fullscan_frequency' to desired values in the application config file - if (fullScanRequired) { - log.log("Performing a database consistency and integrity check on locally stored data due to fullscan requirement ... "); - sync.scanForDifferencesDatabaseScan(localPath); - // handle any inotify events that occured 'whilst' we were scanning the database - m.update(true); - } else { - log.vdebug("NOT performing Database Integrity Check .. fullScanRequired = FALSE"); - m.update(true); - } - - // Filesystem walk to find new files not uploaded - log.vdebug("Searching local filesystem for new data"); - sync.scanForDifferencesFilesystemScan(localPath); - // handle any inotify events that occured 'whilst' we were scanning the local filesystem - m.update(true); - } - - SysTime endIntegrityCheckProcessingTime = Clock.currTime(); - if (sync.getPerformanceProcessingOutput()) { - // performance timing for DB and file system integrity check - finish - writeln("End Integrity Check Processing Time: ", endIntegrityCheckProcessingTime); - writeln("Elapsed Function Processing Time: ", (endIntegrityCheckProcessingTime - startIntegrityCheckProcessingTime)); - writeln("============================================================"); - } - - // At this point, all OneDrive changes / local changes should be uploaded and in sync - // This MAY not be the case when using sync_list, thus a full walk of OneDrive ojects is required - - // --synchronize & no sync_list : fullScanRequired = false, syncListConfiguredFullScanOverride = false - // --synchronize & sync_list in use : fullScanRequired = false, syncListConfiguredFullScanOverride = true - - // --monitor loops around 12 iterations. On the 1st loop, sets fullScanRequired = true, syncListConfiguredFullScanOverride = true if requried - - // --monitor & no sync_list (loop #1) : fullScanRequired = true, syncListConfiguredFullScanOverride = false - // --monitor & no sync_list (loop #2 - #12) : fullScanRequired = false, syncListConfiguredFullScanOverride = false - // --monitor & sync_list in use (loop #1) : fullScanRequired = true, syncListConfiguredFullScanOverride = true - // --monitor & sync_list in use (loop #2 - #12) : fullScanRequired = false, syncListConfiguredFullScanOverride = false - - // Do not perform a full walk of the OneDrive objects - if ((!fullScanRequired) && (!syncListConfiguredFullScanOverride)){ - logOutputMessage = "Final True-Up: Do not perform a full walk of the OneDrive objects - not required"; - syncCallLogOutput = "Calling sync.applyDifferences(false);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - sync.applyDifferences(false); - } - - // Perform a full walk of OneDrive objects because sync_list is in use / or trigger was set in --monitor loop - if ((!fullScanRequired) && (syncListConfiguredFullScanOverride)){ - logOutputMessage = "Final True-Up: Perform a full walk of OneDrive objects because sync_list is in use / or trigger was set in --monitor loop"; - syncCallLogOutput = "Calling sync.applyDifferences(true);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - sync.applyDifferences(true); - } - - // Perform a full walk of OneDrive objects because a full scan was required - if ((fullScanRequired) && (!syncListConfiguredFullScanOverride)){ - logOutputMessage = "Final True-Up: Perform a full walk of OneDrive objects because a full scan was required"; - syncCallLogOutput = "Calling sync.applyDifferences(true);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - sync.applyDifferences(true); - } +void processResyncDatabaseRemoval(string databaseFilePathToRemove) { + log.vdebug("Testing if we have exclusive access to local database file"); + // Are we the only running instance? Test that we can open the database file path + itemDB = new ItemDatabase(databaseFilePathToRemove); + + // did we successfully initialise the database class? + if (!itemDB.isDatabaseInitialised()) { + // no .. destroy class + itemDB = null; + // exit application - void function, force exit this way + exit(-1); + } + + // If we have exclusive access we will not have exited + // destroy access test + destroy(itemDB); + // delete application sync state + log.log("Deleting the saved application sync status ..."); + if (!appConfig.getValueBool("dry_run")) { + safeRemove(databaseFilePathToRemove); + } else { + // --dry-run scenario ... technically we should not be making any local file changes ....... + log.log("DRY RUN: Not removing the saved application sync status"); + } +} - // Perform a full walk of OneDrive objects because a full scan was required and sync_list is in use and trigger was set in --monitor loop - if ((fullScanRequired) && (syncListConfiguredFullScanOverride)){ - logOutputMessage = "Final True-Up: Perform a full walk of OneDrive objects because a full scan was required and sync_list is in use and trigger was set in --monitor loop"; - syncCallLogOutput = "Calling sync.applyDifferences(true);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - sync.applyDifferences(true); - } - } - } - } - } - } +void cleanupDryRunDatabaseFiles(string dryRunDatabaseFile) { + // Temp variables + string dryRunShmFile = dryRunDatabaseFile ~ "-shm"; + string dryRunWalFile = dryRunDatabaseFile ~ "-wal"; - // sync is complete - logOutputMessage = "################################################ SYNC COMPLETE ###############################################"; - if (displaySyncOptions) { - log.log(logOutputMessage); - } else { - log.vdebug(logOutputMessage); - } + // If the dry run database exists, clean this up + if (exists(dryRunDatabaseFile)) { + // remove the existing file + log.vdebug("DRY-RUN: Removing items-dryrun.sqlite3 as it still exists for some reason"); + safeRemove(dryRunDatabaseFile); + } + + // silent cleanup of shm files if it exists + if (exists(dryRunShmFile)) { + // remove items-dryrun.sqlite3-shm + log.vdebug("DRY-RUN: Removing items-dryrun.sqlite3-shm as it still exists for some reason"); + safeRemove(dryRunShmFile); + } + + // silent cleanup of wal files if it exists + if (exists(dryRunWalFile)) { + // remove items-dryrun.sqlite3-wal + log.vdebug("DRY-RUN: Removing items-dryrun.sqlite3-wal as it still exists for some reason"); + safeRemove(dryRunWalFile); + } +} - count = -1; - } catch (Exception e) { - if (++count == 3) { - log.log("Giving up on sync after three attempts: ", e.msg); - throw e; - } else - log.log("Retry sync count: ", count, ": ", e.msg); +void checkForNoMountScenario() { + // If this is a 'mounted' folder, the 'mount point' should have this file to help the application stop any action to preserve data because the drive to mount is not currently mounted + if (appConfig.getValueBool("check_nomount")) { + // we were asked to check the mount point for the presence of a '.nosync' file + if (exists(".nosync")) { + log.logAndNotify("ERROR: .nosync file found in directory mount point. Aborting application startup process to safeguard data."); + exit(EXIT_FAILURE); } - } while (count != -1); + } } -// getting around the @nogc problem +// Getting around the @nogc problem // https://p0nce.github.io/d-idioms/#Bypassing-@nogc -auto assumeNoGC(T) (T t) if (isFunctionPointer!T || isDelegate!T) -{ +auto assumeNoGC(T) (T t) if (isFunctionPointer!T || isDelegate!T) { enum attrs = functionAttributes!T | FunctionAttribute.nogc; return cast(SetFunctionAttributes!(T, functionLinkage!T, attrs)) t; } +// Catch CTRL-C extern(C) nothrow @nogc @system void exitHandler(int value) { try { assumeNoGC ( () { log.log("Got termination signal, performing clean up"); - // if initialised, shut down the HTTP instance - if (onedriveInitialised) { - log.log("Shutting down the HTTP instance"); - oneDrive.shutdown(); - } // was itemDb initialised? - if (itemDb.isDatabaseInitialised()) { + if (itemDB.isDatabaseInitialised()) { // Make sure the .wal file is incorporated into the main db before we exit - log.log("Shutting down db connection and merging temporary data"); - itemDb.performVacuum(); - destroy(itemDb); + log.log("Shutting down DB connection and merging temporary data"); + itemDB.performVacuum(); + destroy(itemDB); } })(); } catch(Exception e) {} exit(0); -} - +} \ No newline at end of file diff --git a/src/monitor.d b/src/monitor.d index 06aac0d7a..adad5be54 100644 --- a/src/monitor.d +++ b/src/monitor.d @@ -1,27 +1,49 @@ -import core.sys.linux.sys.inotify; +// What is this module called? +module monitor; + +// What does this module require to function? import core.stdc.errno; -import core.sys.posix.poll, core.sys.posix.unistd; -import std.exception, std.file, std.path, std.regex, std.stdio, std.string, std.algorithm; import core.stdc.stdlib; +import core.sys.linux.sys.inotify; +import core.sys.posix.poll; +import core.sys.posix.unistd; +import std.algorithm; +import std.exception; +import std.file; +import std.path; +import std.regex; +import std.stdio; +import std.string; +import std.conv; + +// What other modules that we have created do we need to import? import config; -import selective; import util; -static import log; +import log; +import clientSideFiltering; -// relevant inotify events +// Relevant inotify events private immutable uint32_t mask = IN_CLOSE_WRITE | IN_CREATE | IN_DELETE | IN_MOVE | IN_IGNORED | IN_Q_OVERFLOW; -class MonitorException: ErrnoException -{ - @safe this(string msg, string file = __FILE__, size_t line = __LINE__) - { +class MonitorException: ErrnoException { + @safe this(string msg, string file = __FILE__, size_t line = __LINE__) { super(msg, file, line); } } -final class Monitor -{ - bool verbose; +final class Monitor { + // Class variables + ApplicationConfig appConfig; + ClientSideFiltering selectiveSync; + + // Are we verbose in logging output + bool verbose = false; + // skip symbolic links + bool skip_symlinks = false; + // check for .nosync if enabled + bool check_nosync = false; + + // Configure Private Class Variables // inotify file descriptor private int fd; // map every inotify watch descriptor to its directory @@ -30,29 +52,27 @@ final class Monitor private string[int] cookieToPath; // buffer to receive the inotify events private void[] buffer; - // skip symbolic links - bool skip_symlinks; - // check for .nosync if enabled - bool check_nosync; - private SelectiveSync selectiveSync; - + // Configure function delegates void delegate(string path) onDirCreated; void delegate(string path) onFileChanged; void delegate(string path) onDelete; void delegate(string from, string to) onMove; - - this(SelectiveSync selectiveSync) - { - assert(selectiveSync); + + // Configure the class varaible to consume the application configuration including selective sync + this(ApplicationConfig appConfig, ClientSideFiltering selectiveSync) { + this.appConfig = appConfig; this.selectiveSync = selectiveSync; } - - void init(Config cfg, bool verbose, bool skip_symlinks, bool check_nosync) - { - this.verbose = verbose; - this.skip_symlinks = skip_symlinks; - this.check_nosync = check_nosync; + + // Initialise the monitor class + void initialise() { + // Configure the variables + skip_symlinks = appConfig.getValueBool("skip_symlinks"); + check_nosync = appConfig.getValueBool("check_nosync"); + if (appConfig.getValueLong("verbose") > 0) { + verbose = true; + } assert(onDirCreated && onFileChanged && onDelete && onMove); fd = inotify_init(); @@ -61,9 +81,9 @@ final class Monitor // from which point do we start watching for changes? string monitorPath; - if (cfg.getValueString("single_directory") != ""){ - // single directory in use, monitor only this - monitorPath = "./" ~ cfg.getValueString("single_directory"); + if (appConfig.getValueString("single_directory") != ""){ + // single directory in use, monitor only this path + monitorPath = "./" ~ appConfig.getValueString("single_directory"); } else { // default monitorPath = "."; @@ -71,14 +91,14 @@ final class Monitor addRecursive(monitorPath); } - void shutdown() - { + // Shutdown the monitor class + void shutdown() { if (fd > 0) close(fd); wdToDirName = null; } - private void addRecursive(string dirname) - { + // Recursivly add this path to be monitored + private void addRecursive(string dirname) { // skip non existing/disappeared items if (!exists(dirname)) { log.vlog("Not adding non-existing/disappeared directory: ", dirname); @@ -173,19 +193,21 @@ final class Monitor } } - private void add(string pathname) - { + // Add this path to be monitored + private void add(string pathname) { int wd = inotify_add_watch(fd, toStringz(pathname), mask); if (wd < 0) { if (errno() == ENOSPC) { + // Get the current value + ulong maxInotifyWatches = to!int(strip(readText("/proc/sys/fs/inotify/max_user_watches"))); log.log("The user limit on the total number of inotify watches has been reached."); - log.log("To see the current max number of watches run:"); - log.log("sysctl fs.inotify.max_user_watches"); - log.log("To change the current max number of watches to 524288 run:"); - log.log("sudo sysctl fs.inotify.max_user_watches=524288"); + log.log("Your current limit of inotify watches is: ", maxInotifyWatches); + log.log("It is recommended that you change the max number of inotify watches to at least double your existing value."); + log.log("To change the current max number of watches to " , (maxInotifyWatches * 2) , " run:"); + log.log("EXAMPLE: sudo sysctl fs.inotify.max_user_watches=", (maxInotifyWatches * 2)); } if (errno() == 13) { - if ((selectiveSync.getSkipDotfiles()) && (selectiveSync.isDotFile(pathname))) { + if ((selectiveSync.getSkipDotfiles()) && (isDotFile(pathname))) { // no misleading output that we could not add a watch due to permission denied return; } else { @@ -206,18 +228,17 @@ final class Monitor if (isDir(pathname)) { // This is a directory // is the path exluded if skip_dotfiles configured and path is a .folder? - if ((selectiveSync.getSkipDotfiles()) && (selectiveSync.isDotFile(pathname))) { + if ((selectiveSync.getSkipDotfiles()) && (isDotFile(pathname))) { // no misleading output that we are monitoring this directory return; } // Log that this is directory is being monitored - log.vlog("Monitor directory: ", pathname); + log.vlog("Monitoring directory: ", pathname); } } - // remove a watch descriptor - private void remove(int wd) - { + // Remove a watch descriptor + private void remove(int wd) { assert(wd in wdToDirName); int ret = inotify_rm_watch(fd, wd); if (ret < 0) throw new MonitorException("inotify_rm_watch failed"); @@ -225,9 +246,8 @@ final class Monitor wdToDirName.remove(wd); } - // remove the watch descriptors associated to the given path - private void remove(const(char)[] path) - { + // Remove the watch descriptors associated to the given path + private void remove(const(char)[] path) { path ~= "/"; foreach (wd, dirname; wdToDirName) { if (dirname.startsWith(path)) { @@ -239,17 +259,17 @@ final class Monitor } } - // return the file path from an inotify event - private string getPath(const(inotify_event)* event) - { + // Return the file path from an inotify event + private string getPath(const(inotify_event)* event) { string path = wdToDirName[event.wd]; if (event.len > 0) path ~= fromStringz(event.name.ptr); log.vdebug("inotify path event for: ", path); return path; } - void update(bool useCallbacks = true) - { + // Update + void update(bool useCallbacks = true) { + pollfd fds = { fd: fd, events: POLLIN @@ -386,6 +406,8 @@ final class Monitor remove(path); cookieToPath.remove(cookie); } + // Debug Log that all inotify events are flushed + log.vdebug("inotify events flushed"); } } } diff --git a/src/onedrive.d b/src/onedrive.d index 29d33a46e..70e513f97 100644 --- a/src/onedrive.d +++ b/src/onedrive.d @@ -1,104 +1,45 @@ -import std.net.curl; -import etc.c.curl: CurlOption; -import std.datetime, std.datetime.systime, std.exception, std.file, std.json, std.path; -import std.stdio, std.string, std.uni, std.uri, std.file, std.uuid; -import std.array: split; -import core.atomic : atomicOp; -import core.stdc.stdlib; -import core.thread, std.conv, std.math; +// What is this module called? +module onedrive; + +// What does this module require to function? +import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit; +import core.memory; +import core.thread; +import std.stdio; +import std.string; +import std.utf; +import std.file; +import std.exception; +import std.regex; +import std.json; import std.algorithm.searching; -import std.concurrency; -import progress; -import config; -import util; -import arsd.cgi; +import std.net.curl; import std.datetime; -static import log; -shared bool debugResponse = false; -private bool dryRun = false; -private bool simulateNoRefreshTokenFile = false; -private ulong retryAfterValue = 0; - -private immutable { - // Client ID / Application ID (abraunegg) - string clientIdDefault = "d50ca740-c83f-4d1b-b616-12c519384f0c"; - - // Azure Active Directory & Graph Explorer Endpoints - // Global & Defaults - string globalAuthEndpoint = "https://login.microsoftonline.com"; - string globalGraphEndpoint = "https://graph.microsoft.com"; - - // US Government L4 - string usl4AuthEndpoint = "https://login.microsoftonline.us"; - string usl4GraphEndpoint = "https://graph.microsoft.us"; - - // US Government L5 - string usl5AuthEndpoint = "https://login.microsoftonline.us"; - string usl5GraphEndpoint = "https://dod-graph.microsoft.us"; - - // Germany - string deAuthEndpoint = "https://login.microsoftonline.de"; - string deGraphEndpoint = "https://graph.microsoft.de"; - - // China - string cnAuthEndpoint = "https://login.chinacloudapi.cn"; - string cnGraphEndpoint = "https://microsoftgraph.chinacloudapi.cn"; -} - -private { - // Client ID / Application ID - string clientId = clientIdDefault; - - // Default User Agent configuration - string isvTag = "ISV"; - string companyName = "abraunegg"; - // Application name as per Microsoft Azure application registration - string appTitle = "OneDrive Client for Linux"; - - // Default Drive ID - string driveId = ""; - - // API Query URL's, based on using defaults, but can be updated by config option 'azure_ad_endpoint' - // Authentication - string authUrl = globalAuthEndpoint ~ "/common/oauth2/v2.0/authorize"; - string redirectUrl = globalAuthEndpoint ~ "/common/oauth2/nativeclient"; - string tokenUrl = globalAuthEndpoint ~ "/common/oauth2/v2.0/token"; - - // Drive Queries - string driveUrl = globalGraphEndpoint ~ "/v1.0/me/drive"; - string driveByIdUrl = globalGraphEndpoint ~ "/v1.0/drives/"; - - // What is 'shared with me' Query - string sharedWithMeUrl = globalGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; +import std.path; +import std.conv; +import std.math; +import std.uri; - // Item Queries - string itemByIdUrl = globalGraphEndpoint ~ "/v1.0/me/drive/items/"; - string itemByPathUrl = globalGraphEndpoint ~ "/v1.0/me/drive/root:/"; - - // Office 365 / SharePoint Queries - string siteSearchUrl = globalGraphEndpoint ~ "/v1.0/sites?search"; - string siteDriveUrl = globalGraphEndpoint ~ "/v1.0/sites/"; - - // Subscriptions - string subscriptionUrl = globalGraphEndpoint ~ "/v1.0/subscriptions"; -} +// What other modules that we have created do we need to import? +import config; +import log; +import util; +import curlEngine; +import progress; -class OneDriveException: Exception -{ +class OneDriveException: Exception { // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/errors int httpStatusCode; JSONValue error; - @safe pure this(int httpStatusCode, string reason, string file = __FILE__, size_t line = __LINE__) - { + @safe pure this(int httpStatusCode, string reason, string file = __FILE__, size_t line = __LINE__) { this.httpStatusCode = httpStatusCode; this.error = error; string msg = format("HTTP request returned status code %d (%s)", httpStatusCode, reason); super(msg, file, line); } - this(int httpStatusCode, string reason, ref const JSONValue error, string file = __FILE__, size_t line = __LINE__) - { + this(int httpStatusCode, string reason, ref const JSONValue error, string file = __FILE__, size_t line = __LINE__) { this.httpStatusCode = httpStatusCode; this.error = error; string msg = format("HTTP request returned status code %d (%s)\n%s", httpStatusCode, reason, toJSON(error, true)); @@ -106,481 +47,332 @@ class OneDriveException: Exception } } -class OneDriveWebhook { - // We need OneDriveWebhook.serve to be a static function, otherwise we would hit the member function - // "requires a dual-context, which is deprecated" warning. The root cause is described here: - // - https://issues.dlang.org/show_bug.cgi?id=5710 - // - https://forum.dlang.org/post/fkyppfxzegenniyzztos@forum.dlang.org - // The problem is deemed a bug and should be fixed in the compilers eventually. The singleton stuff - // could be undone when it is fixed. - // - // Following the singleton pattern described here: https://wiki.dlang.org/Low-Lock_Singleton_Pattern - // Cache instantiation flag in thread-local bool - // Thread local - private static bool instantiated_; - - // Thread global - private __gshared OneDriveWebhook instance_; - - private string host; - private ushort port; - private Tid parentTid; - private shared uint count; - - static OneDriveWebhook getOrCreate(string host, ushort port, Tid parentTid) { - if (!instantiated_) { - synchronized(OneDriveWebhook.classinfo) { - if (!instance_) { - instance_ = new OneDriveWebhook(host, port, parentTid); - } - - instantiated_ = true; - } - } +class OneDriveApi { + // Class variables + ApplicationConfig appConfig; + CurlEngine curlEngine; + string clientId = ""; + string companyName = ""; + string authUrl = ""; + string redirectUrl = ""; + string tokenUrl = ""; + string driveUrl = ""; + string driveByIdUrl = ""; + string sharedWithMeUrl = ""; + string itemByIdUrl = ""; + string itemByPathUrl = ""; + string siteSearchUrl = ""; + string siteDriveUrl = ""; + string subscriptionUrl = ""; + string tenantId = ""; + string authScope = ""; + string refreshToken = ""; + bool dryRun = false; + bool debugResponse = false; + ulong retryAfterValue = 0; + + this(ApplicationConfig appConfig) { + // Configure the class varaible to consume the application configuration + this.appConfig = appConfig; + // Configure the major API Query URL's, based on using application configuration + // These however can be updated by config option 'azure_ad_endpoint', thus handled differently + + // Drive Queries + driveUrl = appConfig.globalGraphEndpoint ~ "/v1.0/me/drive"; + driveByIdUrl = appConfig.globalGraphEndpoint ~ "/v1.0/drives/"; - return instance_; - } + // What is 'shared with me' Query + sharedWithMeUrl = appConfig.globalGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; - private this(string host, ushort port, Tid parentTid) { - this.host = host; - this.port = port; - this.parentTid = parentTid; - this.count = 0; - } + // Item Queries + itemByIdUrl = appConfig.globalGraphEndpoint ~ "/v1.0/me/drive/items/"; + itemByPathUrl = appConfig.globalGraphEndpoint ~ "/v1.0/me/drive/root:/"; - // The static serve() is necessary because spawn() does not like instance methods - static serve() { - // we won't create the singleton instance if it hasn't been created already - // such case is a bug which should crash the program and gets fixed - instance_.serveImpl(); - } + // Office 365 / SharePoint Queries + siteSearchUrl = appConfig.globalGraphEndpoint ~ "/v1.0/sites?search"; + siteDriveUrl = appConfig.globalGraphEndpoint ~ "/v1.0/sites/"; - // The static handle() is necessary to work around the dual-context warning mentioned above - private static void handle(Cgi cgi) { - // we won't create the singleton instance if it hasn't been created already - // such case is a bug which should crash the program and gets fixed - instance_.handleImpl(cgi); + // Subscriptions + subscriptionUrl = appConfig.globalGraphEndpoint ~ "/v1.0/subscriptions"; } + + // Initialise the OneDrive API class + bool initialise() { + // Initialise the curl engine + curlEngine = new CurlEngine(); + curlEngine.initialise(appConfig.getValueLong("dns_timeout"), appConfig.getValueLong("connect_timeout"), appConfig.getValueLong("data_timeout"), appConfig.getValueLong("operation_timeout"), appConfig.defaultMaxRedirects, appConfig.getValueBool("debug_https"), appConfig.getValueString("user_agent"), appConfig.getValueBool("force_http_11"), appConfig.getValueLong("rate_limit"), appConfig.getValueLong("ip_protocol_version")); - private void serveImpl() { - auto server = new RequestServer(host, port); - server.serveEmbeddedHttp!handle(); - } + // Authorised value to return + bool authorised = false; - private void handleImpl(Cgi cgi) { - if (.debugResponse) { - log.log("Webhook request: ", cgi.requestMethod, " ", cgi.requestUri); - if (!cgi.postBody.empty) { - log.log("Webhook post body: ", cgi.postBody); - } + // Did the user specify --dry-run + dryRun = appConfig.getValueBool("dry_run"); + + // Did the user specify --debug-https + debugResponse = appConfig.getValueBool("debug_https"); + + // Set clientId to use the configured 'application_id' + clientId = appConfig.getValueString("application_id"); + if (clientId != appConfig.defaultApplicationId) { + // a custom 'application_id' was set + companyName = "custom_application"; } - - cgi.setResponseContentType("text/plain"); - - if ("validationToken" in cgi.get) { - // For validation requests, respond with the validation token passed in the query string - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/webhook-receiver-validation-request - cgi.write(cgi.get["validationToken"]); - log.log("Webhook: handled validation request"); + + // Do we have a custom Azure Tenant ID? + if (!appConfig.getValueString("azure_tenant_id").empty) { + // Use the value entered by the user + tenantId = appConfig.getValueString("azure_tenant_id"); } else { - // Notifications don't include any information about the changes that triggered them. - // Put a refresh signal in the queue and let the main monitor loop process it. - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/using-webhooks - count.atomicOp!"+="(1); - send(parentTid, to!ulong(count)); - cgi.write("OK"); - log.log("Webhook: sent refresh signal #", count); + // set to common + tenantId = "common"; } - } -} - -final class OneDriveApi -{ - private Config cfg; - private string refreshToken, accessToken, subscriptionId; - private SysTime accessTokenExpiration; - private HTTP http; - private OneDriveWebhook webhook; - private SysTime subscriptionExpiration; - private Duration subscriptionExpirationInterval, subscriptionRenewalInterval; - private string notificationUrl; - - // if true, every new access token is printed - bool printAccessToken; - - this(Config cfg) - { - this.cfg = cfg; - http = HTTP(); - // Curl Timeout Handling - // libcurl dns_cache_timeout timeout - http.dnsTimeout = (dur!"seconds"(cfg.getValueLong("dns_timeout"))); - // Timeout for HTTPS connections - http.connectTimeout = (dur!"seconds"(cfg.getValueLong("connect_timeout"))); - // with the following settings we force - // - if there is no data flow for 10min, abort - // - if the download time for one item exceeds 1h, abort - // - // timeout for activity on connection - // this translates into Curl's CURLOPT_LOW_SPEED_TIME - // which says - // It contains the time in number seconds that the - // transfer speed should be below the CURLOPT_LOW_SPEED_LIMIT - // for the library to consider it too slow and abort. - http.dataTimeout = (dur!"seconds"(cfg.getValueLong("data_timeout"))); - // maximum time an operation is allowed to take - // This includes dns resolution, connecting, data transfer, etc. - http.operationTimeout = (dur!"seconds"(cfg.getValueLong("operation_timeout"))); - // What IP protocol version should be used when using Curl - IPv4 & IPv6, IPv4 or IPv6 - http.handle.set(CurlOption.ipresolve,cfg.getValueLong("ip_protocol_version")); // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only - // Specify how many redirects should be allowed - http.maxRedirects(cfg.defaultMaxRedirects); - - // Do we enable curl debugging? - if (cfg.getValueBool("debug_https")) { - http.verbose = true; - .debugResponse = true; - - // Output what options we are using so that in the debug log this can be tracked - log.vdebug("http.dnsTimeout = ", cfg.getValueLong("dns_timeout")); - log.vdebug("http.connectTimeout = ", cfg.getValueLong("connect_timeout")); - log.vdebug("http.dataTimeout = ", cfg.getValueLong("data_timeout")); - log.vdebug("http.operationTimeout = ", cfg.getValueLong("operation_timeout")); - log.vdebug("http.CurlOption.ipresolve = ", cfg.getValueLong("ip_protocol_version")); - log.vdebug("http.maxRedirects = ", cfg.defaultMaxRedirects); - } - - // Update clientId if application_id is set in config file - if (cfg.getValueString("application_id") != "") { - // an application_id is set in config file - log.vdebug("Setting custom application_id to: " , cfg.getValueString("application_id")); - clientId = cfg.getValueString("application_id"); - companyName = "custom_application"; + + // Did the user specify a 'drive_id' ? + if (!appConfig.getValueString("drive_id").empty) { + // Update base URL's + driveUrl = driveByIdUrl ~ appConfig.getValueString("drive_id"); + itemByIdUrl = driveUrl ~ "/items"; + itemByPathUrl = driveUrl ~ "/root:/"; } - - // Configure tenant id value, if 'azure_tenant_id' is configured, - // otherwise use the "common" multiplexer - string tenantId = "common"; - if (cfg.getValueString("azure_tenant_id") != "") { - // Use the value entered by the user - tenantId = cfg.getValueString("azure_tenant_id"); + + // Configure the authentication scope + if (appConfig.getValueBool("read_only_auth_scope")) { + // read-only authentication scopes has been requested + authScope = "&scope=Files.Read%20Files.Read.All%20Sites.Read.All%20offline_access&response_type=code&prompt=login&redirect_uri="; + } else { + // read-write authentication scopes will be used (default) + authScope = "&scope=Files.ReadWrite%20Files.ReadWrite.All%20Sites.ReadWrite.All%20offline_access&response_type=code&prompt=login&redirect_uri="; } - + // Configure Azure AD endpoints if 'azure_ad_endpoint' is configured - string azureConfigValue = cfg.getValueString("azure_ad_endpoint"); + string azureConfigValue = appConfig.getValueString("azure_ad_endpoint"); switch(azureConfigValue) { case "": if (tenantId == "common") { - log.log("Configuring Global Azure AD Endpoints"); + if (!appConfig.apiWasInitialised) log.log("Configuring Global Azure AD Endpoints"); } else { - log.log("Configuring Global Azure AD Endpoints - Single Tenant Application"); + if (!appConfig.apiWasInitialised) log.log("Configuring Global Azure AD Endpoints - Single Tenant Application"); } // Authentication - authUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; - redirectUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; - tokenUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; + authUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; + redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + tokenUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; break; case "USL4": - log.log("Configuring Azure AD for US Government Endpoints"); + if (!appConfig.apiWasInitialised) log.log("Configuring Azure AD for US Government Endpoints"); // Authentication - authUrl = usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; - tokenUrl = usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; - if (clientId == clientIdDefault) { + authUrl = appConfig.usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; + tokenUrl = appConfig.usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; + if (clientId == appConfig.defaultApplicationId) { // application_id == default log.vdebug("USL4 AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint"); - redirectUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } else { // custom application_id - redirectUrl = usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + redirectUrl = appConfig.usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } // Drive Queries - driveUrl = usl4GraphEndpoint ~ "/v1.0/me/drive"; - driveByIdUrl = usl4GraphEndpoint ~ "/v1.0/drives/"; + driveUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/me/drive"; + driveByIdUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/drives/"; // Item Queries - itemByIdUrl = usl4GraphEndpoint ~ "/v1.0/me/drive/items/"; - itemByPathUrl = usl4GraphEndpoint ~ "/v1.0/me/drive/root:/"; + itemByIdUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/me/drive/items/"; + itemByPathUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/me/drive/root:/"; // Office 365 / SharePoint Queries - siteSearchUrl = usl4GraphEndpoint ~ "/v1.0/sites?search"; - siteDriveUrl = usl4GraphEndpoint ~ "/v1.0/sites/"; + siteSearchUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/sites?search"; + siteDriveUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/sites/"; // Shared With Me - sharedWithMeUrl = usl4GraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; + sharedWithMeUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; // Subscriptions - subscriptionUrl = usl4GraphEndpoint ~ "/v1.0/subscriptions"; + subscriptionUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/subscriptions"; break; case "USL5": - log.log("Configuring Azure AD for US Government Endpoints (DOD)"); + if (!appConfig.apiWasInitialised) log.log("Configuring Azure AD for US Government Endpoints (DOD)"); // Authentication - authUrl = usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; - tokenUrl = usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; - if (clientId == clientIdDefault) { + authUrl = appConfig.usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; + tokenUrl = appConfig.usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; + if (clientId == appConfig.defaultApplicationId) { // application_id == default log.vdebug("USL5 AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint"); - redirectUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } else { // custom application_id - redirectUrl = usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + redirectUrl = appConfig.usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } // Drive Queries - driveUrl = usl5GraphEndpoint ~ "/v1.0/me/drive"; - driveByIdUrl = usl5GraphEndpoint ~ "/v1.0/drives/"; + driveUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/me/drive"; + driveByIdUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/drives/"; // Item Queries - itemByIdUrl = usl5GraphEndpoint ~ "/v1.0/me/drive/items/"; - itemByPathUrl = usl5GraphEndpoint ~ "/v1.0/me/drive/root:/"; + itemByIdUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/me/drive/items/"; + itemByPathUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/me/drive/root:/"; // Office 365 / SharePoint Queries - siteSearchUrl = usl5GraphEndpoint ~ "/v1.0/sites?search"; - siteDriveUrl = usl5GraphEndpoint ~ "/v1.0/sites/"; + siteSearchUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/sites?search"; + siteDriveUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/sites/"; // Shared With Me - sharedWithMeUrl = usl5GraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; + sharedWithMeUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; // Subscriptions - subscriptionUrl = usl5GraphEndpoint ~ "/v1.0/subscriptions"; + subscriptionUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/subscriptions"; break; case "DE": - log.log("Configuring Azure AD Germany"); + if (!appConfig.apiWasInitialised) log.log("Configuring Azure AD Germany"); // Authentication - authUrl = deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; - tokenUrl = deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; - if (clientId == clientIdDefault) { + authUrl = appConfig.deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; + tokenUrl = appConfig.deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; + if (clientId == appConfig.defaultApplicationId) { // application_id == default log.vdebug("DE AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint"); - redirectUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } else { // custom application_id - redirectUrl = deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + redirectUrl = appConfig.deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } // Drive Queries - driveUrl = deGraphEndpoint ~ "/v1.0/me/drive"; - driveByIdUrl = deGraphEndpoint ~ "/v1.0/drives/"; + driveUrl = appConfig.deGraphEndpoint ~ "/v1.0/me/drive"; + driveByIdUrl = appConfig.deGraphEndpoint ~ "/v1.0/drives/"; // Item Queries - itemByIdUrl = deGraphEndpoint ~ "/v1.0/me/drive/items/"; - itemByPathUrl = deGraphEndpoint ~ "/v1.0/me/drive/root:/"; + itemByIdUrl = appConfig.deGraphEndpoint ~ "/v1.0/me/drive/items/"; + itemByPathUrl = appConfig.deGraphEndpoint ~ "/v1.0/me/drive/root:/"; // Office 365 / SharePoint Queries - siteSearchUrl = deGraphEndpoint ~ "/v1.0/sites?search"; - siteDriveUrl = deGraphEndpoint ~ "/v1.0/sites/"; + siteSearchUrl = appConfig.deGraphEndpoint ~ "/v1.0/sites?search"; + siteDriveUrl = appConfig.deGraphEndpoint ~ "/v1.0/sites/"; // Shared With Me - sharedWithMeUrl = deGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; + sharedWithMeUrl = appConfig.deGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; // Subscriptions - subscriptionUrl = deGraphEndpoint ~ "/v1.0/subscriptions"; + subscriptionUrl = appConfig.deGraphEndpoint ~ "/v1.0/subscriptions"; break; case "CN": - log.log("Configuring AD China operated by 21Vianet"); + if (!appConfig.apiWasInitialised) log.log("Configuring AD China operated by 21Vianet"); // Authentication - authUrl = cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; - tokenUrl = cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; - if (clientId == clientIdDefault) { + authUrl = appConfig.cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; + tokenUrl = appConfig.cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; + if (clientId == appConfig.defaultApplicationId) { // application_id == default log.vdebug("CN AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint"); - redirectUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } else { // custom application_id - redirectUrl = cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + redirectUrl = appConfig.cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } // Drive Queries - driveUrl = cnGraphEndpoint ~ "/v1.0/me/drive"; - driveByIdUrl = cnGraphEndpoint ~ "/v1.0/drives/"; + driveUrl = appConfig.cnGraphEndpoint ~ "/v1.0/me/drive"; + driveByIdUrl = appConfig.cnGraphEndpoint ~ "/v1.0/drives/"; // Item Queries - itemByIdUrl = cnGraphEndpoint ~ "/v1.0/me/drive/items/"; - itemByPathUrl = cnGraphEndpoint ~ "/v1.0/me/drive/root:/"; + itemByIdUrl = appConfig.cnGraphEndpoint ~ "/v1.0/me/drive/items/"; + itemByPathUrl = appConfig.cnGraphEndpoint ~ "/v1.0/me/drive/root:/"; // Office 365 / SharePoint Queries - siteSearchUrl = cnGraphEndpoint ~ "/v1.0/sites?search"; - siteDriveUrl = cnGraphEndpoint ~ "/v1.0/sites/"; + siteSearchUrl = appConfig.cnGraphEndpoint ~ "/v1.0/sites?search"; + siteDriveUrl = appConfig.cnGraphEndpoint ~ "/v1.0/sites/"; // Shared With Me - sharedWithMeUrl = cnGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; + sharedWithMeUrl = appConfig.cnGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; // Subscriptions - subscriptionUrl = cnGraphEndpoint ~ "/v1.0/subscriptions"; + subscriptionUrl = appConfig.cnGraphEndpoint ~ "/v1.0/subscriptions"; break; // Default - all other entries default: - log.log("Unknown Azure AD Endpoint request - using Global Azure AD Endpoints"); + if (!appConfig.apiWasInitialised) log.log("Unknown Azure AD Endpoint request - using Global Azure AD Endpoints"); } - + + // Has the application been authenticated? + if (!exists(appConfig.refreshTokenFilePath)) { + log.vdebug("Application has no 'refresh_token' thus needs to be authenticated"); + authorised = authorise(); + } else { + // Try and read the value from the appConfig if it is set, rather than trying to read the value from disk + if (!appConfig.refreshToken.empty) { + log.vdebug("Read token from appConfig"); + refreshToken = strip(appConfig.refreshToken); + authorised = true; + } else { + // Try and read the file from disk + try { + refreshToken = strip(readText(appConfig.refreshTokenFilePath)); + // is the refresh_token empty? + if (refreshToken.empty) { + log.error("refreshToken exists but is empty: ", appConfig.refreshTokenFilePath); + authorised = authorise(); + } else { + // existing token not empty + authorised = true; + // update appConfig.refreshToken + appConfig.refreshToken = refreshToken; + } + } catch (FileException e) { + authorised = authorise(); + } catch (std.utf.UTFException e) { + // path contains characters which generate a UTF exception + log.error("Cannot read refreshToken from: ", appConfig.refreshTokenFilePath); + log.error(" Error Reason:", e.msg); + authorised = false; + } + } + + if (refreshToken.empty) { + // PROBLEM + writeln("refreshToken is empty !!!!!!!!!! will cause 4xx errors"); + } + } + // Return if we are authorised + log.vdebug("Authorised State: ", authorised); + return authorised; + } + + // If the API has been configured correctly, print the items that been configured + void debugOutputConfiguredAPIItems() { // Debug output of configured URL's + // Application Identification + log.vdebug("Configured clientId ", clientId); + log.vdebug("Configured userAgent ", appConfig.getValueString("user_agent")); // Authentication + log.vdebug("Configured authScope: ", authScope); log.vdebug("Configured authUrl: ", authUrl); log.vdebug("Configured redirectUrl: ", redirectUrl); log.vdebug("Configured tokenUrl: ", tokenUrl); - // Drive Queries log.vdebug("Configured driveUrl: ", driveUrl); log.vdebug("Configured driveByIdUrl: ", driveByIdUrl); - // Shared With Me log.vdebug("Configured sharedWithMeUrl: ", sharedWithMeUrl); - // Item Queries log.vdebug("Configured itemByIdUrl: ", itemByIdUrl); log.vdebug("Configured itemByPathUrl: ", itemByPathUrl); - // SharePoint Queries log.vdebug("Configured siteSearchUrl: ", siteSearchUrl); log.vdebug("Configured siteDriveUrl: ", siteDriveUrl); - - // Configure the User Agent string - if (cfg.getValueString("user_agent") == "") { - // Application User Agent string defaults - // Comply with OneDrive traffic decoration requirements - // https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online - // - Identify as ISV and include Company Name, App Name separated by a pipe character and then adding Version number separated with a slash character - // Note: If you've created an application, the recommendation is to register and use AppID and AppTitle - // The issue here is that currently the application is still using the 'skilion' application ID, thus no idea what the AppTitle used was. - http.setUserAgent = isvTag ~ "|" ~ companyName ~ "|" ~ appTitle ~ "/" ~ strip(import("version")); - } else { - // Use the value entered by the user - http.setUserAgent = cfg.getValueString("user_agent"); - } - - // What version of HTTP protocol do we use? - // Curl >= 7.62.0 defaults to http2 for a significant number of operations - if (cfg.getValueBool("force_http_11")) { - // Downgrade to curl to use HTTP 1.1 for all operations - log.vlog("Downgrading all HTTP operations to HTTP/1.1 due to user configuration"); - // Downgrade to HTTP 1.1 - yes version = 2 is HTTP 1.1 - http.handle.set(CurlOption.http_version,2); - } else { - // Use curl defaults - log.vlog("Using Curl defaults for all HTTP operations"); - } - - // Configure upload / download rate limits if configured - long userRateLimit = cfg.getValueLong("rate_limit"); - // 131072 = 128 KB/s - minimum for basic application operations to prevent timeouts - // A 0 value means rate is unlimited, and is the curl default - - if (userRateLimit > 0) { - // User configured rate limit - writeln("User Configured Rate Limit: ", userRateLimit); - - // If user provided rate limit is < 131072, flag that this is too low, setting to the minimum of 131072 - if (userRateLimit < 131072) { - // user provided limit too low - log.log("WARNING: User configured rate limit too low for normal application processing and preventing application timeouts. Overriding to default minimum of 131072 (128KB/s)"); - userRateLimit = 131072; - } - - // set rate limit - http.handle.set(CurlOption.max_send_speed_large,userRateLimit); - http.handle.set(CurlOption.max_recv_speed_large,userRateLimit); - } - - // Explicitly set libcurl options - // https://curl.se/libcurl/c/CURLOPT_NOSIGNAL.html - // Ensure that nosignal is set to 0 - Setting CURLOPT_NOSIGNAL to 0 makes libcurl ask the system to ignore SIGPIPE signals - http.handle.set(CurlOption.nosignal,0); - // https://curl.se/libcurl/c/CURLOPT_TCP_NODELAY.html - // Ensure that TCP_NODELAY is set to 0 to ensure that TCP NAGLE is enabled - http.handle.set(CurlOption.tcp_nodelay,0); - // https://curl.se/libcurl/c/CURLOPT_FORBID_REUSE.html - // Ensure that we ARE reusing connections - setting to 0 ensures that we are reusing connections - http.handle.set(CurlOption.forbid_reuse,0); - - // Do we set the dryRun handlers? - if (cfg.getValueBool("dry_run")) { - .dryRun = true; - if (cfg.getValueBool("logout")) { - .simulateNoRefreshTokenFile = true; - } - } - - subscriptionExpiration = Clock.currTime(UTC()); - subscriptionExpirationInterval = dur!"seconds"(cfg.getValueLong("webhook_expiration_interval")); - subscriptionRenewalInterval = dur!"seconds"(cfg.getValueLong("webhook_renewal_interval")); - notificationUrl = cfg.getValueString("webhook_public_url"); - } - - // Shutdown OneDrive HTTP construct - void shutdown() - { - // delete subscription if there exists any - deleteSubscription(); - - // reset any values to defaults, freeing any set objects - http.clearRequestHeaders(); - http.onSend = null; - http.onReceive = null; - http.onReceiveHeader = null; - http.onReceiveStatusLine = null; - http.contentLength = 0; - // shut down the curl instance - http.shutdown(); } - - bool init() - { - static import std.utf; - // detail what we are using for applicaion identification - log.vdebug("clientId = ", clientId); - log.vdebug("companyName = ", companyName); - log.vdebug("appTitle = ", appTitle); - - try { - driveId = cfg.getValueString("drive_id"); - if (driveId.length) { - driveUrl = driveByIdUrl ~ driveId; - itemByIdUrl = driveUrl ~ "/items"; - itemByPathUrl = driveUrl ~ "/root:/"; - } - } catch (Exception e) {} - - if (!.dryRun) { - // original code - try { - refreshToken = readText(cfg.refreshTokenFilePath); - } catch (FileException e) { - try { - return authorize(); - } catch (CurlException e) { - log.error("Cannot authorize with Microsoft OneDrive Service"); - return false; - } - } catch (std.utf.UTFException e) { - // path contains characters which generate a UTF exception - log.error("Cannot read refreshToken from: ", cfg.refreshTokenFilePath); - log.error(" Error Reason:", e.msg); - return false; - } - return true; - } else { - // --dry-run - if (!.simulateNoRefreshTokenFile) { - try { - refreshToken = readText(cfg.refreshTokenFilePath); - } catch (FileException e) { - return authorize(); - } catch (std.utf.UTFException e) { - // path contains characters which generate a UTF exception - log.error("Cannot read refreshToken from: ", cfg.refreshTokenFilePath); - log.error(" Error Reason:", e.msg); - return false; - } - return true; - } else { - // --dry-run & --reauth - return authorize(); - } - } + + // Shutdown OneDrive API Curl Engine + void shutdown() { + // Delete subscription if there exists any + //deleteSubscription(); + + // Reset any values to defaults, freeing any set objects + curlEngine.http.clearRequestHeaders(); + curlEngine.http.onSend = null; + curlEngine.http.onReceive = null; + curlEngine.http.onReceiveHeader = null; + curlEngine.http.onReceiveStatusLine = null; + curlEngine.http.contentLength = 0; + // Shut down the curl instance & close any open sockets + curlEngine.http.shutdown(); + // Free object and memory + object.destroy(curlEngine); } - - bool authorize() - { - import std.stdio, std.regex; + + // Authenticate this client against Microsoft OneDrive API + bool authorise() { + char[] response; - string authScope; - // What authentication scope to use? - if (cfg.getValueBool("read_only_auth_scope")) { - // read-only authentication scopes has been requested - authScope = "&scope=Files.Read%20Files.Read.All%20Sites.Read.All%20offline_access&response_type=code&prompt=login&redirect_uri="; - } else { - // read-write authentication scopes will be used (default) - authScope = "&scope=Files.ReadWrite%20Files.ReadWrite.All%20Sites.ReadWrite.All%20offline_access&response_type=code&prompt=login&redirect_uri="; - } - + // What URL should be presented to the user to access string url = authUrl ~ "?client_id=" ~ clientId ~ authScope ~ redirectUrl; - string authFilesString = cfg.getValueString("auth_files"); - string authResponseString = cfg.getValueString("auth_response"); - if (authResponseString != "") { + // Configure automated authentication if --auth-files authUrl:responseUrl is being used + string authFilesString = appConfig.getValueString("auth_files"); + string authResponseString = appConfig.getValueString("auth_response"); + + if (!authResponseString.empty) { + // read the response from authResponseString response = cast(char[]) authResponseString; } else if (authFilesString != "") { string[] authFiles = authFilesString.split(":"); @@ -588,22 +380,28 @@ final class OneDriveApi string responseUrl = authFiles[1]; try { - // Try and write out the auth URL to the nominated file auto authUrlFile = File(authUrl, "w"); authUrlFile.write(url); authUrlFile.close(); - } catch (std.exception.ErrnoException e) { + } catch (FileException e) { // There was a file system error // display the error message displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - return false; + exit(-1); + } catch (ErrnoException e) { + // There was a file system error + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + exit(-1); } + + log.log("Client requires authentication before proceeding. Waiting for --auth-files elements to be available."); while (!exists(responseUrl)) { Thread.sleep(dur!("msecs")(100)); } - // read response from OneDrive + // read response from provided from OneDrive try { response = cast(char[]) read(responseUrl); } catch (OneDriveException e) { @@ -621,314 +419,211 @@ final class OneDriveApi return false; } } else { - log.log("Authorize this app visiting:\n"); - write(url, "\n\n", "Enter the response uri: "); + log.log("Authorise this application by visiting:\n"); + write(url, "\n\n", "Enter the response uri from your browser: "); readln(response); - cfg.applicationAuthorizeResponseUri = true; + appConfig.applicationAuthorizeResponseUri = true; } // match the authorization code auto c = matchFirst(response, r"(?:[\?&]code=)([\w\d-.]+)"); if (c.empty) { - log.log("Invalid response uri entered"); + log.log("An empty or invalid response uri was entered"); return false; } c.popFront(); // skip the whole match redeemToken(c.front); + + return true; + } - string getSiteSearchUrl() - { - // Return the actual siteSearchUrl being used and/or requested when performing 'siteQuery = onedrive.o365SiteSearch(nextLink);' call - return .siteSearchUrl; - } - - ulong getRetryAfterValue() - { - // Return the current value of retryAfterValue if it has been set to something other than 0 - return .retryAfterValue; - } - - void resetRetryAfterValue() - { - // Reset the current value of retryAfterValue to 0 after it has been used - .retryAfterValue = 0; - } - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/drive_get - JSONValue getDefaultDrive() - { + JSONValue getDefaultDriveDetails() { checkAccessTokenExpired(); - const(char)[] url; + string url; url = driveUrl; return get(driveUrl); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get - JSONValue getDefaultRoot() - { + JSONValue getDefaultRootDetails() { checkAccessTokenExpired(); - const(char)[] url; + string url; url = driveUrl ~ "/root"; return get(url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get - JSONValue getDriveIdRoot(const(char)[] driveId) - { + JSONValue getDriveIdRoot(string driveId) { checkAccessTokenExpired(); - const(char)[] url; + string url; url = driveByIdUrl ~ driveId ~ "/root"; return get(url); } - - // https://docs.microsoft.com/en-us/graph/api/drive-sharedwithme - JSONValue getSharedWithMe() - { - checkAccessTokenExpired(); - return get(sharedWithMeUrl); - } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/drive_get - JSONValue getDriveQuota(const(char)[] driveId) - { + JSONValue getDriveQuota(string driveId) { checkAccessTokenExpired(); - const(char)[] url; + string url; url = driveByIdUrl ~ driveId ~ "/"; url ~= "?select=quota"; return get(url); } - - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_delta - JSONValue viewChangesByItemId(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink) - { + + // Return the details of the specified path, by giving the path we wish to query + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get + JSONValue getPathDetails(string path) { checkAccessTokenExpired(); - const(char)[] url; - // configure deltaLink to query - if (deltaLink.empty) { - url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/delta"; - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; + string url; + if ((path == ".")||(path == "/")) { + url = driveUrl ~ "/root/"; } else { - url = deltaLink; + url = itemByPathUrl ~ encodeComponent(path) ~ ":/"; } return get(url); } - + + // Return the details of the specified item based on its driveID and itemID + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get + JSONValue getPathDetailsById(string driveId, string id) { + checkAccessTokenExpired(); + string url; + url = driveByIdUrl ~ driveId ~ "/items/" ~ id; + //url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; + return get(url); + } + + // Create a shareable link for an existing file on OneDrive based on the accessScope JSON permissions + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_createlink + JSONValue createShareableLink(string driveId, string id, JSONValue accessScope) { + checkAccessTokenExpired(); + string url; + url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/createLink"; + curlEngine.http.addRequestHeader("Content-Type", "application/json"); + return post(url, accessScope.toString()); + } + + // Return the requested details of the specified path on the specified drive id and path + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get + JSONValue getPathDetailsByDriveId(string driveId, string path) { + checkAccessTokenExpired(); + string url; + // Required format: /drives/{drive-id}/root:/{item-path} + url = driveByIdUrl ~ driveId ~ "/root:/" ~ encodeComponent(path); + return get(url); + } + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_delta - JSONValue viewChangesByDriveId(const(char)[] driveId, const(char)[] deltaLink) - { + JSONValue viewChangesByItemId(string driveId, string id, string deltaLink) { checkAccessTokenExpired(); - const(char)[] url = deltaLink; - if (url == null) { - url = driveByIdUrl ~ driveId ~ "/root/delta"; - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; + + // If Business Account add addIncludeFeatureRequestHeader() which should add Prefer: Include-Feature=AddToOneDrive + if ((appConfig.accountType != "personal") && ( appConfig.getValueBool("sync_business_shared_items"))) { + addIncludeFeatureRequestHeader(); + } + + string url; + // configure deltaLink to query + if (deltaLink.empty) { + url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/delta"; + } else { + url = deltaLink; } return get(url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_list_children - JSONValue listChildren(const(char)[] driveId, const(char)[] id, const(char)[] nextLink) - { + JSONValue listChildren(string driveId, string id, string nextLink) { checkAccessTokenExpired(); - const(char)[] url; + + // If Business Account add addIncludeFeatureRequestHeader() which should add Prefer: Include-Feature=AddToOneDrive + if ((appConfig.accountType != "personal") && ( appConfig.getValueBool("sync_business_shared_items"))) { + addIncludeFeatureRequestHeader(); + } + + string url; // configure URL to query if (nextLink.empty) { url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/children"; - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; + //url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; } else { url = nextLink; } return get(url); } - - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get_content - void downloadById(const(char)[] driveId, const(char)[] id, string saveToPath, long fileSize) - { - checkAccessTokenExpired(); - scope(failure) { - if (exists(saveToPath)) { - // try and remove the file, catch error - try { - remove(saveToPath); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } - } - - // Create the required local directory - string newPath = dirName(saveToPath); - - // Does the path exist locally? - if (!exists(newPath)) { - try { - log.vdebug("Requested path does not exist, creating directory structure: ", newPath); - mkdirRecurse(newPath); - // Configure the applicable permissions for the folder - log.vdebug("Setting directory permissions for: ", newPath); - newPath.setAttributes(cfg.returnRequiredDirectoryPermisions()); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } - - const(char)[] url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/content?AVOverride=1"; - // Download file - download(url, saveToPath, fileSize); - // Does path exist? - if (exists(saveToPath)) { - // File was downloaded successfully - configure the applicable permissions for the file - log.vdebug("Setting file permissions for: ", saveToPath); - saveToPath.setAttributes(cfg.returnRequiredFilePermisions()); - } - } - - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content - JSONValue simpleUpload(string localPath, string parentDriveId, string parentId, string filename, const(char)[] eTag = null) - { - checkAccessTokenExpired(); - string url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ ":/" ~ encodeComponent(filename) ~ ":/content"; - // TODO: investigate why this fails for remote folders - //if (eTag) http.addRequestHeader("If-Match", eTag); - /*else http.addRequestHeader("If-None-Match", "*");*/ - return upload(localPath, url); - } - - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content - JSONValue simpleUploadReplace(string localPath, string driveId, string id, const(char)[] eTag = null) - { + + // https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_search + JSONValue searchDriveForPath(string driveId, string path) { checkAccessTokenExpired(); - string url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/content"; - if (eTag) http.addRequestHeader("If-Match", eTag); - return upload(localPath, url); + string url; + url = "https://graph.microsoft.com/v1.0/drives/" ~ driveId ~ "/root/search(q='" ~ encodeComponent(path) ~ "')"; + return get(url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_update - JSONValue updateById(const(char)[] driveId, const(char)[] id, JSONValue data, const(char)[] eTag = null) - { + JSONValue updateById(const(char)[] driveId, const(char)[] id, JSONValue data, const(char)[] eTag = null) { checkAccessTokenExpired(); const(char)[] url = driveByIdUrl ~ driveId ~ "/items/" ~ id; - if (eTag) http.addRequestHeader("If-Match", eTag); - http.addRequestHeader("Content-Type", "application/json"); + if (eTag) curlEngine.http.addRequestHeader("If-Match", eTag); + curlEngine.http.addRequestHeader("Content-Type", "application/json"); return patch(url, data.toString()); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_delete - void deleteById(const(char)[] driveId, const(char)[] id, const(char)[] eTag = null) - { + void deleteById(const(char)[] driveId, const(char)[] id, const(char)[] eTag = null) { checkAccessTokenExpired(); const(char)[] url = driveByIdUrl ~ driveId ~ "/items/" ~ id; //TODO: investigate why this always fail with 412 (Precondition Failed) //if (eTag) http.addRequestHeader("If-Match", eTag); - del(url); + performDelete(url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_post_children - JSONValue createById(const(char)[] parentDriveId, const(char)[] parentId, JSONValue item) - { + JSONValue createById(string parentDriveId, string parentId, JSONValue item) { checkAccessTokenExpired(); - const(char)[] url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ "/children"; - http.addRequestHeader("Content-Type", "application/json"); + string url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ "/children"; + curlEngine.http.addRequestHeader("Content-Type", "application/json"); return post(url, item.toString()); } - - // Return the details of the specified path - JSONValue getPathDetails(const(string) path) - { - checkAccessTokenExpired(); - const(char)[] url; - if ((path == ".")||(path == "/")) url = driveUrl ~ "/root/"; - else url = itemByPathUrl ~ encodeComponent(path) ~ ":/"; - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; - return get(url); - } - - // Return the details of the specified id - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get - JSONValue getPathDetailsById(const(char)[] driveId, const(char)[] id) - { - checkAccessTokenExpired(); - const(char)[] url; - url = driveByIdUrl ~ driveId ~ "/items/" ~ id; - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; - return get(url); - } - - // Return the requested details of the specified path on the specified drive id and path - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get?view=odsp-graph-online - JSONValue getPathDetailsByDriveId(const(char)[] driveId, const(string) path) - { - checkAccessTokenExpired(); - const(char)[] url; - // string driveByIdUrl = "https://graph.microsoft.com/v1.0/drives/"; - // Required format: /drives/{drive-id}/root:/{item-path} - url = driveByIdUrl ~ driveId ~ "/root:/" ~ encodeComponent(path); - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; - return get(url); - } - - // Return the requested details of the specified path on the specified drive id and item id - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get?view=odsp-graph-online - JSONValue getPathDetailsByDriveIdAndItemId(const(char)[] driveId, const(char)[] itemId) - { - checkAccessTokenExpired(); - const(char)[] url; - // string driveByIdUrl = "https://graph.microsoft.com/v1.0/drives/"; - // Required format: /drives/{drive-id}/items/{item-id} - url = driveByIdUrl ~ driveId ~ "/items/" ~ itemId; - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; - return get(url); - } - - // Return the requested details of the specified id - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get - JSONValue getFileDetails(const(char)[] driveId, const(char)[] id) - { - checkAccessTokenExpired(); - const(char)[] url; - url = driveByIdUrl ~ driveId ~ "/items/" ~ id; - url ~= "?select=size,malware,file,webUrl,lastModifiedBy,lastModifiedDateTime"; - return get(url); - } - - // Create an anonymous read-only shareable link for an existing file on OneDrive - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_createlink - JSONValue createShareableLink(const(char)[] driveId, const(char)[] id, JSONValue accessScope) - { + + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content + JSONValue simpleUpload(string localPath, string parentDriveId, string parentId, string filename) { checkAccessTokenExpired(); - const(char)[] url; - url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/createLink"; - http.addRequestHeader("Content-Type", "application/json"); - return post(url, accessScope.toString()); + string url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ ":/" ~ encodeComponent(filename) ~ ":/content"; + return upload(localPath, url); } - - // https://dev.onedrive.com/items/move.htm - JSONValue moveByPath(const(char)[] sourcePath, JSONValue moveData) - { - // Need to use itemByPathUrl + + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content + JSONValue simpleUploadReplace(string localPath, string driveId, string id) { checkAccessTokenExpired(); - string url = itemByPathUrl ~ encodeComponent(sourcePath); - http.addRequestHeader("Content-Type", "application/json"); - return move(url, moveData.toString()); + string url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/content"; + return upload(localPath, url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_createuploadsession - JSONValue createUploadSession(const(char)[] parentDriveId, const(char)[] parentId, const(char)[] filename, const(char)[] eTag = null, JSONValue item = null) - { + //JSONValue createUploadSession(string parentDriveId, string parentId, string filename, string eTag = null, JSONValue item = null) { + JSONValue createUploadSession(string parentDriveId, string parentId, string filename, const(char)[] eTag = null, JSONValue item = null) { checkAccessTokenExpired(); - const(char)[] url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ ":/" ~ encodeComponent(filename) ~ ":/createUploadSession"; - if (eTag) http.addRequestHeader("If-Match", eTag); - http.addRequestHeader("Content-Type", "application/json"); + string url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ ":/" ~ encodeComponent(filename) ~ ":/createUploadSession"; + // eTag If-Match header addition commented out for the moment + // At some point, post the creation of this upload session the eTag is being 'updated' by OneDrive, thus when uploadFragment() is used + // this generates a 412 Precondition Failed and then a 416 Requested Range Not Satisfiable + // This needs to be investigated further as to why this occurs + //if (eTag) curlEngine.http.addRequestHeader("If-Match", eTag); + curlEngine.http.addRequestHeader("Content-Type", "application/json"); return post(url, item.toString()); } - + // https://dev.onedrive.com/items/upload_large_files.htm - JSONValue uploadFragment(const(char)[] uploadUrl, string filepath, long offset, long offsetSize, long fileSize) - { + JSONValue uploadFragment(string uploadUrl, string filepath, long offset, long offsetSize, long fileSize) { checkAccessTokenExpired(); // open file as read-only in binary mode + + // If we upload a modified file, with the current known online eTag, this gets changed when the session is started - thus, the tail end of uploading + // a fragment fails with a 412 Precondition Failed and then a 416 Requested Range Not Satisfiable + // For the moment, comment out adding the If-Match header in createUploadSession, which then avoids this issue + auto file = File(filepath, "rb"); file.seek(offset); string contentRange = "bytes " ~ to!string(offset) ~ "-" ~ to!string(offset + offsetSize - 1) ~ "/" ~ to!string(fileSize); @@ -936,12 +631,12 @@ final class OneDriveApi // function scopes scope(exit) { - http.clearRequestHeaders(); - http.onSend = null; - http.onReceive = null; - http.onReceiveHeader = null; - http.onReceiveStatusLine = null; - http.contentLength = 0; + curlEngine.http.clearRequestHeaders(); + curlEngine.http.onSend = null; + curlEngine.http.onReceive = null; + curlEngine.http.onReceiveHeader = null; + curlEngine.http.onReceiveStatusLine = null; + curlEngine.http.contentLength = 0; // close file if open if (file.isOpen()){ // close open file @@ -949,30 +644,21 @@ final class OneDriveApi } } - http.method = HTTP.Method.put; - http.url = uploadUrl; - http.addRequestHeader("Content-Range", contentRange); - http.onSend = data => file.rawRead(data).length; + curlEngine.http.method = HTTP.Method.put; + curlEngine.http.url = uploadUrl; + curlEngine.http.addRequestHeader("Content-Range", contentRange); + curlEngine.http.onSend = data => file.rawRead(data).length; // convert offsetSize to ulong - http.contentLength = to!ulong(offsetSize); - auto response = perform(); - // TODO: retry on 5xx errors - checkHttpCode(response); + curlEngine.http.contentLength = to!ulong(offsetSize); + auto response = performHTTPOperation(); + checkHttpResponseCode(response); return response; } - - // https://dev.onedrive.com/items/upload_large_files.htm - JSONValue requestUploadStatus(const(char)[] uploadUrl) - { - checkAccessTokenExpired(); - // when using microsoft graph the auth code is different - return get(uploadUrl, true); - } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/site_search?view=odsp-graph-online - JSONValue o365SiteSearch(const(char)[] nextLink){ + JSONValue o365SiteSearch(string nextLink) { checkAccessTokenExpired(); - const(char)[] url; + string url; // configure URL to query if (nextLink.empty) { url = siteSearchUrl ~ "=*"; @@ -981,153 +667,105 @@ final class OneDriveApi } return get(url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/drive_list?view=odsp-graph-online JSONValue o365SiteDrives(string site_id){ checkAccessTokenExpired(); - const(char)[] url; + string url; url = siteDriveUrl ~ site_id ~ "/drives"; return get(url); } - - // Create a new subscription or renew the existing subscription - void createOrRenewSubscription() { + + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get_content + void downloadById(const(char)[] driveId, const(char)[] id, string saveToPath, long fileSize) { checkAccessTokenExpired(); - - // Kick off the webhook server first - if (webhook is null) { - webhook = OneDriveWebhook.getOrCreate( - cfg.getValueString("webhook_listening_host"), - to!ushort(cfg.getValueLong("webhook_listening_port")), - thisTid - ); - spawn(&OneDriveWebhook.serve); - } - - if (!hasValidSubscription()) { - createSubscription(); - } else if (isSubscriptionUpForRenewal()) { - try { - renewSubscription(); - } catch (OneDriveException e) { - if (e.httpStatusCode == 404) { - log.log("The subscription is not found on the server. Recreating subscription ..."); - createSubscription(); + scope(failure) { + if (exists(saveToPath)) { + // try and remove the file, catch error + try { + remove(saveToPath); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } } } - } - - private bool hasValidSubscription() { - return !subscriptionId.empty && subscriptionExpiration > Clock.currTime(UTC()); - } - private bool isSubscriptionUpForRenewal() { - return subscriptionExpiration < Clock.currTime(UTC()) + subscriptionRenewalInterval; - } - - private void createSubscription() { - log.log("Initializing subscription for updates ..."); + // Create the required local directory + string newPath = dirName(saveToPath); - auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval; - const(char)[] url; - url = subscriptionUrl; - // Create a resource item based on if we have a driveId - string resourceItem; - if (driveId.length) { - resourceItem = "/drives/" ~ driveId ~ "/root"; - } else { - resourceItem = "/me/drive/root"; + // Does the path exist locally? + if (!exists(newPath)) { + try { + log.vdebug("Requested path does not exist, creating directory structure: ", newPath); + mkdirRecurse(newPath); + // Configure the applicable permissions for the folder + log.vdebug("Setting directory permissions for: ", newPath); + newPath.setAttributes(appConfig.returnRequiredDirectoryPermisions()); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } } - - // create JSON request to create webhook subscription - const JSONValue request = [ - "changeType": "updated", - "notificationUrl": notificationUrl, - "resource": resourceItem, - "expirationDateTime": expirationDateTime.toISOExtString(), - "clientState": randomUUID().toString() - ]; - http.addRequestHeader("Content-Type", "application/json"); - JSONValue response; - try { - response = post(url, request.toString()); - } catch (OneDriveException e) { - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - - // We need to exit here, user needs to fix issue - log.error("ERROR: Unable to initialize subscriptions for updates. Please fix this issue."); - shutdown(); - exit(-1); + const(char)[] url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/content?AVOverride=1"; + // Download file + downloadFile(url, saveToPath, fileSize); + // Does path exist? + if (exists(saveToPath)) { + // File was downloaded successfully - configure the applicable permissions for the file + log.vdebug("Setting file permissions for: ", saveToPath); + saveToPath.setAttributes(appConfig.returnRequiredFilePermisions()); } - - // Save important subscription metadata including id and expiration - subscriptionId = response["id"].str; - subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); } - - private void renewSubscription() { - log.log("Renewing subscription for updates ..."); - - auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval; - const(char)[] url; - url = subscriptionUrl ~ "/" ~ subscriptionId; - const JSONValue request = [ - "expirationDateTime": expirationDateTime.toISOExtString() - ]; - http.addRequestHeader("Content-Type", "application/json"); - JSONValue response = patch(url, request.toString()); - - // Update subscription expiration from the response - subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); + + // Return the actual siteSearchUrl being used and/or requested when performing 'siteQuery = onedrive.o365SiteSearch(nextLink);' call + string getSiteSearchUrl() { + return siteSearchUrl; + } + + // Return the current value of retryAfterValue + ulong getRetryAfterValue() { + return retryAfterValue; } - private void deleteSubscription() { - if (!hasValidSubscription()) { - return; - } - - const(char)[] url; - url = subscriptionUrl ~ "/" ~ subscriptionId; - del(url); - log.log("Deleted subscription"); + // Reset the current value of retryAfterValue to 0 after it has been used + void resetRetryAfterValue() { + retryAfterValue = 0; } - - private void redeemToken(const(char)[] authCode) - { - const(char)[] postData = - "client_id=" ~ clientId ~ - "&redirect_uri=" ~ redirectUrl ~ - "&code=" ~ authCode ~ - "&grant_type=authorization_code"; - acquireToken(postData); + + private void addAccessTokenHeader() { + curlEngine.http.addRequestHeader("Authorization", appConfig.accessToken); } - - private void newToken() - { - string postData = - "client_id=" ~ clientId ~ - "&redirect_uri=" ~ redirectUrl ~ - "&refresh_token=" ~ refreshToken ~ - "&grant_type=refresh_token"; - acquireToken(postData); + + private void addIncludeFeatureRequestHeader() { + log.vdebug("Adding 'Include-Feature=AddToOneDrive' API request header as 'sync_business_shared_items' config option is enabled"); + curlEngine.http.addRequestHeader("Prefer", "Include-Feature=AddToOneDrive"); } - - private void acquireToken(const(char)[] postData) - { + + private void acquireToken(char[] postData) { JSONValue response; try { response = post(tokenUrl, postData); } catch (OneDriveException e) { // an error was generated - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + if ((e.httpStatusCode == 400) || (e.httpStatusCode == 401)) { + // Handle an unauthorised client + handleClientUnauthorised(e.httpStatusCode, e.msg); + } else { + if (e.httpStatusCode >= 500) { + // There was a HTTP 5xx Server Side Error - retry + acquireToken(postData); + } else { + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + } + } } if (response.type() == JSONType.object) { // Has the client been configured to use read_only_auth_scope - if (cfg.getValueBool("read_only_auth_scope")) { + if (appConfig.getValueBool("read_only_auth_scope")) { // read_only_auth_scope has been configured if ("scope" in response){ string effectiveScopes = response["scope"].str(); @@ -1150,36 +788,64 @@ final class OneDriveApi } if ("access_token" in response){ - accessToken = "bearer " ~ response["access_token"].str(); - refreshToken = response["refresh_token"].str(); - accessTokenExpiration = Clock.currTime() + dur!"seconds"(response["expires_in"].integer()); - if (!.dryRun) { + appConfig.accessToken = "bearer " ~ strip(response["access_token"].str); + + // Do we print the current access token + if (log.verbose > 1) { + if (appConfig.getValueBool("debug_https")) { + if (appConfig.getValueBool("print_token")) { + // This needs to be highly restricted in output .... + log.vdebug("CAUTION - KEEP THIS SAFE: Current access token: ", appConfig.accessToken); + } + } + } + + refreshToken = strip(response["refresh_token"].str); + appConfig.accessTokenExpiration = Clock.currTime() + dur!"seconds"(response["expires_in"].integer()); + if (!dryRun) { + // Update the refreshToken in appConfig so that we can reuse it + if (appConfig.refreshToken.empty) { + // The access token is empty + log.vdebug("Updating appConfig.refreshToken with new refreshToken as appConfig.refreshToken is empty"); + appConfig.refreshToken = refreshToken; + } else { + // Is the access token different? + if (appConfig.refreshToken != refreshToken) { + // Update the memory version + log.vdebug("Updating appConfig.refreshToken with updated refreshToken"); + appConfig.refreshToken = refreshToken; + } + } + + // try and update the refresh_token file on disk try { - // try and update the refresh_token file - std.file.write(cfg.refreshTokenFilePath, refreshToken); - log.vdebug("Setting file permissions for: ", cfg.refreshTokenFilePath); - cfg.refreshTokenFilePath.setAttributes(cfg.returnRequiredFilePermisions()); + log.vdebug("Updating refreshToken on disk"); + std.file.write(appConfig.refreshTokenFilePath, refreshToken); + log.vdebug("Setting file permissions for: ", appConfig.refreshTokenFilePath); + appConfig.refreshTokenFilePath.setAttributes(appConfig.returnRequiredFilePermisions()); } catch (FileException e) { // display the error message displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } } - if (printAccessToken) writeln("New access token: ", accessToken); } else { log.error("\nInvalid authentication response from OneDrive. Please check the response uri\n"); // re-authorize - authorize(); + authorise(); } } else { - log.vdebug("Invalid JSON response from OneDrive unable to initialize application"); + log.log("Invalid response from the OneDrive API. Unable to initialise OneDrive API instance."); + exit(-1); } } - - private void checkAccessTokenExpired() - { + + private void checkAccessTokenExpired() { try { - if (Clock.currTime() >= accessTokenExpiration) { + if (Clock.currTime() >= appConfig.accessTokenExpiration) { + log.vdebug("Microsoft OneDrive Access Token has EXPIRED. Must generate a new Microsoft OneDrive Access Token"); newToken(); + } else { + log.vdebug("Existing Microsoft OneDrive Access Token Expires: ", appConfig.accessTokenExpiration); } } catch (OneDriveException e) { if (e.httpStatusCode == 400 || e.httpStatusCode == 401) { @@ -1192,41 +858,17 @@ final class OneDriveApi } } } - - private void addAccessTokenHeader() - { - http.addRequestHeader("Authorization", accessToken); - } - - private JSONValue get(const(char)[] url, bool skipToken = false) - { - scope(exit) http.clearRequestHeaders(); - log.vdebug("Request URL = ", url); - http.method = HTTP.Method.get; - http.url = url; - if (!skipToken) addAccessTokenHeader(); // HACK: requestUploadStatus - JSONValue response; - response = perform(); - checkHttpCode(response); - // OneDrive API Response Debugging if --https-debug is being used - if (.debugResponse){ - log.vdebug("OneDrive API Response: ", response); - } - return response; - } - - private void del(const(char)[] url) - { - scope(exit) http.clearRequestHeaders(); - http.method = HTTP.Method.del; - http.url = url; + + private void performDelete(const(char)[] url) { + scope(exit) curlEngine.http.clearRequestHeaders(); + curlEngine.http.method = HTTP.Method.del; + curlEngine.http.url = url; addAccessTokenHeader(); - auto response = perform(); - checkHttpCode(response); + auto response = performHTTPOperation(); + checkHttpResponseCode(response); } - - private void download(const(char)[] url, string filename, long fileSize) - { + + private void downloadFile(const(char)[] url, string filename, long fileSize) { // Threshold for displaying download bar long thresholdFileSize = 4 * 2^^20; // 4 MiB @@ -1239,14 +881,14 @@ final class OneDriveApi // function scopes scope(exit) { - http.clearRequestHeaders(); - http.onSend = null; - http.onReceive = null; - http.onReceiveHeader = null; - http.onReceiveStatusLine = null; - http.contentLength = 0; + curlEngine.http.clearRequestHeaders(); + curlEngine.http.onSend = null; + curlEngine.http.onReceive = null; + curlEngine.http.onReceiveHeader = null; + curlEngine.http.onReceiveStatusLine = null; + curlEngine.http.contentLength = 0; // Reset onProgress to not display anything for next download - http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) + curlEngine.http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) { return 0; }; @@ -1257,11 +899,11 @@ final class OneDriveApi } } - http.method = HTTP.Method.get; - http.url = url; + curlEngine.http.method = HTTP.Method.get; + curlEngine.http.url = url; addAccessTokenHeader(); - http.onReceive = (ubyte[] data) { + curlEngine.http.onReceive = (ubyte[] data) { file.rawWrite(data); return data.length; }; @@ -1277,7 +919,7 @@ final class OneDriveApi real percentCheck = 5.0; long segmentCount = 1; // Setup progress bar to display - http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) + curlEngine.http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) { // For each onProgress, what is the % of dlnow to dltotal // floor - rounds down to nearest whole number @@ -1303,13 +945,13 @@ final class OneDriveApi // Expected Total = 52428800 // Percent Complete = 26 - if (cfg.getValueLong("rate_limit") > 0) { + if (appConfig.getValueLong("rate_limit") > 0) { // User configured rate limit // How much data should be in each segment to qualify for 5% - long dataPerSegment = to!long(floor(double(dltotal)/iteration)); + ulong dataPerSegment = to!ulong(floor(double(dltotal)/iteration)); // How much data received do we need to validate against - long thisSegmentData = dataPerSegment * segmentCount; - long nextSegmentData = dataPerSegment * (segmentCount + 1); + ulong thisSegmentData = dataPerSegment * segmentCount; + ulong nextSegmentData = dataPerSegment * (segmentCount + 1); // Has the data that has been received in a 5% window that we need to increment the progress bar at if ((dlnow > thisSegmentData) && (dlnow < nextSegmentData) && (previousProgressPercent != currentDLPercent) || (dlnow == dltotal)) { // Downloaded data equals approx 5% @@ -1349,7 +991,7 @@ final class OneDriveApi // Perform download & display progress bar try { // try and catch any curl error - http.perform(); + curlEngine.http.perform(); // Check the HTTP Response headers - needed for correct 429 handling // check will be performed in checkHttpCode() writeln(); @@ -1363,7 +1005,7 @@ final class OneDriveApi // No progress bar try { // try and catch any curl error - http.perform(); + curlEngine.http.perform(); // Check the HTTP Response headers - needed for correct 429 handling // check will be performed in checkHttpCode() } catch (CurlException e) { @@ -1378,81 +1020,59 @@ final class OneDriveApi checkHttpCode(); } - private auto patch(T)(const(char)[] url, const(T)[] patchData) - { - scope(exit) http.clearRequestHeaders(); - http.method = HTTP.Method.patch; - http.url = url; - addAccessTokenHeader(); - auto response = perform(patchData); - checkHttpCode(response); + private JSONValue get(string url, bool skipToken = false) { + scope(exit) curlEngine.http.clearRequestHeaders(); + log.vdebug("Request URL = ", url); + curlEngine.http.method = HTTP.Method.get; + curlEngine.http.url = url; + if (!skipToken) addAccessTokenHeader(); // HACK: requestUploadStatus + JSONValue response; + response = performHTTPOperation(); + checkHttpResponseCode(response); + // OneDrive API Response Debugging if --https-debug is being used + if (debugResponse){ + log.vdebug("OneDrive API Response: ", response); + } return response; } - - private auto post(T)(const(char)[] url, const(T)[] postData) - { - scope(exit) http.clearRequestHeaders(); - http.method = HTTP.Method.post; - http.url = url; - addAccessTokenHeader(); - auto response = perform(postData); - checkHttpCode(response); - return response; + + private void newToken() { + log.vdebug("Need to generate a new access token for Microsoft OneDrive"); + string postData = + "client_id=" ~ clientId ~ + "&redirect_uri=" ~ redirectUrl ~ + "&refresh_token=" ~ refreshToken ~ + "&grant_type=refresh_token"; + char[] strArr = postData.dup; + acquireToken(strArr); } - - private auto move(T)(const(char)[] url, const(T)[] postData) - { - scope(exit) http.clearRequestHeaders(); - http.method = HTTP.Method.patch; - http.url = url; + + private auto patch(T)(const(char)[] url, const(T)[] patchData) { + curlEngine.setMethodPatch(); + curlEngine.http.url = url; addAccessTokenHeader(); - auto response = perform(postData); - // Check the HTTP response code, which, if a 429, will also check response headers - checkHttpCode(); + auto response = perform(patchData); + checkHttpResponseCode(response); return response; } - - private JSONValue upload(string filepath, string url) - { - checkAccessTokenExpired(); - // open file as read-only in binary mode - auto file = File(filepath, "rb"); - - // function scopes - scope(exit) { - http.clearRequestHeaders(); - http.onSend = null; - http.onReceive = null; - http.onReceiveHeader = null; - http.onReceiveStatusLine = null; - http.contentLength = 0; - // close file if open - if (file.isOpen()){ - // close open file - file.close(); - } - } - - http.method = HTTP.Method.put; - http.url = url; + + private auto post(T)(string url, const(T)[] postData) { + curlEngine.setMethodPost(); + curlEngine.http.url = url; addAccessTokenHeader(); - http.addRequestHeader("Content-Type", "application/octet-stream"); - http.onSend = data => file.rawRead(data).length; - http.contentLength = file.size; - auto response = perform(); - checkHttpCode(response); + auto response = perform(postData); + checkHttpResponseCode(response); return response; } - - private JSONValue perform(const(void)[] sendData) - { + + private JSONValue perform(const(void)[] sendData) { scope(exit) { - http.onSend = null; - http.contentLength = 0; + curlEngine.http.onSend = null; + curlEngine.http.contentLength = 0; } if (sendData) { - http.contentLength = sendData.length; - http.onSend = (void[] buf) { + curlEngine.http.contentLength = sendData.length; + curlEngine.http.onSend = (void[] buf) { import std.algorithm: min; size_t minLen = min(buf.length, sendData.length); if (minLen == 0) return 0; @@ -1461,34 +1081,33 @@ final class OneDriveApi return minLen; }; } else { - http.onSend = buf => 0; + curlEngine.http.onSend = buf => 0; } - auto response = perform(); + auto response = performHTTPOperation(); return response; } - - private JSONValue perform() - { - scope(exit) http.onReceive = null; + + private JSONValue performHTTPOperation() { + scope(exit) curlEngine.http.onReceive = null; char[] content; JSONValue json; - http.onReceive = (ubyte[] data) { + curlEngine.http.onReceive = (ubyte[] data) { content ~= data; // HTTP Server Response Code Debugging if --https-debug is being used - if (.debugResponse){ - log.vdebug("onedrive.perform() => OneDrive HTTP Server Response: ", http.statusLine.code); + if (debugResponse){ + log.vdebug("onedrive.performHTTPOperation() => OneDrive HTTP Server Response: ", curlEngine.http.statusLine.code); } return data.length; }; try { - http.perform(); + curlEngine.http.perform(); // Check the HTTP Response headers - needed for correct 429 handling checkHTTPResponseHeaders(); } catch (CurlException e) { // Parse and display error message received from OneDrive - log.vdebug("onedrive.perform() Generated a OneDrive CurlException"); + log.vdebug("onedrive.performHTTPOperation() Generated a OneDrive CurlException"); auto errorArray = splitLines(e.msg); string errorMessage = errorArray[0]; @@ -1515,16 +1134,16 @@ final class OneDriveApi try { // configure libcurl to perform a fresh connection log.vdebug("Configuring libcurl to use a fresh connection for re-try"); - http.handle.set(CurlOption.fresh_connect,1); + curlEngine.http.handle.set(CurlOption.fresh_connect,1); // try the access - http.perform(); + curlEngine.http.perform(); // Check the HTTP Response headers - needed for correct 429 handling checkHTTPResponseHeaders(); // no error from http.perform() on re-try log.log("Internet connectivity to Microsoft OneDrive service has been restored"); // unset the fresh connect option as this then creates performance issues if left enabled log.vdebug("Unsetting libcurl to use a fresh connection as this causes a performance impact if left enabled"); - http.handle.set(CurlOption.fresh_connect,0); + curlEngine.http.handle.set(CurlOption.fresh_connect,0); // connectivity restored retrySuccess = true; } catch (CurlException e) { @@ -1580,20 +1199,40 @@ final class OneDriveApi throw new OneDriveException(408, "Request Timeout - HTTP 408 or Internet down?"); } } else { - // Log that an error was returned - log.error("ERROR: OneDrive returned an error with the following message:"); - // Some other error was returned - log.error(" Error Message: ", errorMessage); - log.error(" Calling Function: ", getFunctionName!({})); + + // what error was returned? + if (canFind(errorMessage, "Problem with the SSL CA cert (path? access rights?) on handle")) { + // error setting certificate verify locations: + // CAfile: /etc/pki/tls/certs/ca-bundle.crt + // CApath: none + // + // Tell the Curl Engine to bypass SSL check - essentially SSL is passing back a bad value due to 'stdio' compile time option + // Further reading: + // https://github.com/curl/curl/issues/6090 + // https://github.com/openssl/openssl/issues/7536 + // https://stackoverflow.com/questions/45829588/brew-install-fails-curl77-error-setting-certificate-verify + // https://forum.dlang.org/post/vwvkbubufexgeuaxhqfl@forum.dlang.org + + log.vdebug("Problem with reading the SSL CA cert via libcurl - attempting work around"); + curlEngine.setDisableSSLVerifyPeer(); + // retry origional call + performHTTPOperation(); + } else { + // Log that an error was returned + log.error("ERROR: OneDrive returned an error with the following message:"); + // Some other error was returned + log.error(" Error Message: ", errorMessage); + log.error(" Calling Function: ", getFunctionName!({})); - // Was this a curl initialization error? - if (canFind(errorMessage, "Failed initialization on handle")) { - // initialization error ... prevent a run-away process if we have zero disk space - ulong localActualFreeSpace = to!ulong(getAvailableDiskSpace(".")); - if (localActualFreeSpace == 0) { - // force exit - shutdown(); - exit(-1); + // Was this a curl initialization error? + if (canFind(errorMessage, "Failed initialization on handle")) { + // initialization error ... prevent a run-away process if we have zero disk space + ulong localActualFreeSpace = getAvailableDiskSpace("."); + if (localActualFreeSpace == 0) { + // force exit + shutdown(); + exit(-1); + } } } } @@ -1609,26 +1248,135 @@ final class OneDriveApi } return json; } + + private void redeemToken(char[] authCode){ + char[] postData = + "client_id=" ~ clientId ~ + "&redirect_uri=" ~ redirectUrl ~ + "&code=" ~ authCode ~ + "&grant_type=authorization_code"; + acquireToken(postData); + } + + private JSONValue upload(string filepath, string url) { + checkAccessTokenExpired(); + // open file as read-only in binary mode + auto file = File(filepath, "rb"); + + // function scopes + scope(exit) { + curlEngine.http.clearRequestHeaders(); + curlEngine.http.onSend = null; + curlEngine.http.onReceive = null; + curlEngine.http.onReceiveHeader = null; + curlEngine.http.onReceiveStatusLine = null; + curlEngine.http.contentLength = 0; + // close file if open + if (file.isOpen()){ + // close open file + file.close(); + } + } - private void checkHTTPResponseHeaders() - { + curlEngine.http.method = HTTP.Method.put; + curlEngine.http.url = url; + addAccessTokenHeader(); + curlEngine.http.addRequestHeader("Content-Type", "application/octet-stream"); + curlEngine.http.onSend = data => file.rawRead(data).length; + curlEngine.http.contentLength = file.size; + auto response = performHTTPOperation(); + checkHttpResponseCode(response); + return response; + } + + private void checkHTTPResponseHeaders() { // Get the HTTP Response headers - needed for correct 429 handling - auto responseHeaders = http.responseHeaders(); - if (.debugResponse){ - log.vdebug("http.perform() => HTTP Response Headers: ", responseHeaders); + auto responseHeaders = curlEngine.http.responseHeaders(); + if (debugResponse){ + log.vdebug("curlEngine.http.perform() => HTTP Response Headers: ", responseHeaders); } // is retry-after in the response headers - if ("retry-after" in http.responseHeaders) { + if ("retry-after" in curlEngine.http.responseHeaders) { // Set the retry-after value - log.vdebug("http.perform() => Received a 'Retry-After' Header Response with the following value: ", http.responseHeaders["retry-after"]); - log.vdebug("http.perform() => Setting retryAfterValue to: ", http.responseHeaders["retry-after"]); - .retryAfterValue = to!ulong(http.responseHeaders["retry-after"]); + log.vdebug("curlEngine.http.perform() => Received a 'Retry-After' Header Response with the following value: ", curlEngine.http.responseHeaders["retry-after"]); + log.vdebug("curlEngine.http.perform() => Setting retryAfterValue to: ", curlEngine.http.responseHeaders["retry-after"]); + retryAfterValue = to!ulong(curlEngine.http.responseHeaders["retry-after"]); } } - private void checkHttpCode() - { + private void checkHttpResponseCode(JSONValue response) { + switch(curlEngine.http.statusLine.code) { + // 0 - OK ... HTTP2 version of 200 OK + case 0: + break; + // 100 - Continue + case 100: + break; + // 200 - OK + case 200: + // No Log .. + break; + // 201 - Created OK + // 202 - Accepted + // 204 - Deleted OK + case 201,202,204: + // No actions, but log if verbose logging + //log.vlog("OneDrive Response: '", curlEngine.http.statusLine.code, " - ", curlEngine.http.statusLine.reason, "'"); + break; + + // 302 - resource found and available at another location, redirect + case 302: + break; + + // 400 - Bad Request + case 400: + // Bad Request .. how should we act? + // make sure this is thrown so that it is caught + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); + + // 403 - Forbidden + case 403: + // OneDrive responded that the user is forbidden + log.vlog("OneDrive returned a 'HTTP 403 - Forbidden' - gracefully handling error"); + // Throw this as a specific exception so this is caught when performing 'siteQuery = onedrive.o365SiteSearch(nextLink);' call + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); + + // 412 - Precondition Failed + case 412: + // Throw this as a specific exception so this is caught when performing sync.uploadLastModifiedTime + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); + + // Server side (OneDrive) Errors + // 500 - Internal Server Error + // 502 - Bad Gateway + // 503 - Service Unavailable + // 504 - Gateway Timeout (Issue #320) + case 500: + // Throw this as a specific exception so this is caught + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); + + case 502: + // Throw this as a specific exception so this is caught + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); + + case 503: + // Throw this as a specific exception so this is caught + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); + + case 504: + // Throw this as a specific exception so this is caught + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); + + // Default - all other errors that are not a 2xx or a 302 + default: + if (curlEngine.http.statusLine.code / 100 != 2 && curlEngine.http.statusLine.code != 302) { + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); + } + } + } + + private void checkHttpCode() { // https://dev.onedrive.com/misc/errors.htm // https://developer.overdrive.com/docs/reference-guide @@ -1673,7 +1421,7 @@ final class OneDriveApi */ - switch(http.statusLine.code) + switch(curlEngine.http.statusLine.code) { // 0 - OK ... HTTP2 version of 200 OK case 0: @@ -1746,7 +1494,7 @@ final class OneDriveApi checkHTTPResponseHeaders(); // https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online log.vlog("OneDrive returned a 'HTTP 429 - Too Many Requests' - gracefully handling error"); - throw new OneDriveException(http.statusLine.code, http.statusLine.reason); + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason); // Server side (OneDrive) Errors // 500 - Internal Server Error @@ -1775,113 +1523,7 @@ final class OneDriveApi // "else" default: - throw new OneDriveException(http.statusLine.code, http.statusLine.reason); - } - } - - private void checkHttpCode(ref const JSONValue response) - { - switch(http.statusLine.code) - { - // 0 - OK ... HTTP2 version of 200 OK - case 0: - break; - // 100 - Continue - case 100: - break; - // 200 - OK - case 200: - // No Log .. - break; - // 201 - Created OK - // 202 - Accepted - // 204 - Deleted OK - case 201,202,204: - // No actions, but log if verbose logging - //log.vlog("OneDrive Response: '", http.statusLine.code, " - ", http.statusLine.reason, "'"); - break; - - // 302 - resource found and available at another location, redirect - case 302: - break; - - // 400 - Bad Request - case 400: - // Bad Request .. how should we act? - // make sure this is thrown so that it is caught - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - // 403 - Forbidden - case 403: - // OneDrive responded that the user is forbidden - log.vlog("OneDrive returned a 'HTTP 403 - Forbidden' - gracefully handling error"); - // Throw this as a specific exception so this is caught when performing 'siteQuery = onedrive.o365SiteSearch(nextLink);' call - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - // 412 - Precondition Failed - case 412: - // Throw this as a specific exception so this is caught when performing sync.uploadLastModifiedTime - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - // Server side (OneDrive) Errors - // 500 - Internal Server Error - // 502 - Bad Gateway - // 503 - Service Unavailable - // 504 - Gateway Timeout (Issue #320) - case 500: - // Throw this as a specific exception so this is caught - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - case 502: - // Throw this as a specific exception so this is caught - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - case 503: - // Throw this as a specific exception so this is caught - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - case 504: - // Throw this as a specific exception so this is caught - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - // Default - all other errors that are not a 2xx or a 302 - default: - if (http.statusLine.code / 100 != 2 && http.statusLine.code != 302) { - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - } + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason); } } -} - -unittest -{ - string configDirName = expandTilde("~/.config/onedrive"); - auto cfg = new config.Config(configDirName); - cfg.init(); - OneDriveApi onedrive = new OneDriveApi(cfg); - onedrive.init(); - std.file.write("/tmp/test", "test"); - - // simpleUpload - auto item = onedrive.simpleUpload("/tmp/test", "/test"); - try { - item = onedrive.simpleUpload("/tmp/test", "/test"); - } catch (OneDriveException e) { - assert(e.httpStatusCode == 409); - } - try { - item = onedrive.simpleUpload("/tmp/test", "/test", "123"); - } catch (OneDriveException e) { - assert(e.httpStatusCode == 412); - } - item = onedrive.simpleUpload("/tmp/test", "/test", item["eTag"].str); - - // deleteById - try { - onedrive.deleteById(item["id"].str, "123"); - } catch (OneDriveException e) { - assert(e.httpStatusCode == 412); - } - onedrive.deleteById(item["id"].str, item["eTag"].str); - onedrive.http.shutdown(); -} +} \ No newline at end of file diff --git a/src/progress.d b/src/progress.d index 9277ae121..03d5481a8 100644 --- a/src/progress.d +++ b/src/progress.d @@ -1,5 +1,7 @@ +// What is this module called? module progress; +// What does this module require to function? import std.stdio; import std.range; import std.format; @@ -7,6 +9,8 @@ import std.datetime; import core.sys.posix.unistd; import core.sys.posix.sys.ioctl; +// What other modules that we have created do we need to import? + class Progress { private: diff --git a/src/qxor.d b/src/qxor.d index 63e8f0f5e..64de204f7 100644 --- a/src/qxor.d +++ b/src/qxor.d @@ -1,7 +1,11 @@ +// What is this module called? +module qxor; + +// What does this module require to function? import std.algorithm; import std.digest; -// implementation of the QuickXorHash algorithm in D +// Implementation of the QuickXorHash algorithm in D // https://github.com/OneDrive/onedrive-api-docs/blob/live/docs/code-snippets/quickxorhash.md struct QuickXor { @@ -71,18 +75,4 @@ struct QuickXor } return tmp; } -} - -unittest -{ - assert(isDigest!QuickXor); -} - -unittest -{ - QuickXor qxor; - qxor.put(cast(ubyte[]) "The quick brown fox jumps over the lazy dog"); - assert(qxor.finish().toHexString() == "6CC4A56F2B26C492FA4BBE57C1F31C4193A972BE"); -} - -alias QuickXorDigest = WrapperDigest!(QuickXor); +} \ No newline at end of file diff --git a/src/selective.d b/src/selective.d deleted file mode 100644 index 55be94eb7..000000000 --- a/src/selective.d +++ /dev/null @@ -1,422 +0,0 @@ -import std.algorithm; -import std.array; -import std.file; -import std.path; -import std.regex; -import std.stdio; -import std.string; -import util; -import log; - -final class SelectiveSync -{ - private string[] paths; - private string[] businessSharedFoldersList; - private Regex!char mask; - private Regex!char dirmask; - private bool skipDirStrictMatch = false; - private bool skipDotfiles = false; - - // load sync_list file - void load(string filepath) - { - if (exists(filepath)) { - // open file as read only - auto file = File(filepath, "r"); - auto range = file.byLine(); - foreach (line; range) { - // Skip comments in file - if (line.length == 0 || line[0] == ';' || line[0] == '#') continue; - paths ~= buildNormalizedPath(line); - } - file.close(); - } - } - - // Configure skipDirStrictMatch if function is called - // By default, skipDirStrictMatch = false; - void setSkipDirStrictMatch() - { - skipDirStrictMatch = true; - } - - // load business_shared_folders file - void loadSharedFolders(string filepath) - { - if (exists(filepath)) { - // open file as read only - auto file = File(filepath, "r"); - auto range = file.byLine(); - foreach (line; range) { - // Skip comments in file - if (line.length == 0 || line[0] == ';' || line[0] == '#') continue; - businessSharedFoldersList ~= buildNormalizedPath(line); - } - file.close(); - } - } - - void setFileMask(const(char)[] mask) - { - this.mask = wild2regex(mask); - } - - void setDirMask(const(char)[] dirmask) - { - this.dirmask = wild2regex(dirmask); - } - - // Configure skipDotfiles if function is called - // By default, skipDotfiles = false; - void setSkipDotfiles() - { - skipDotfiles = true; - } - - // return value of skipDotfiles - bool getSkipDotfiles() - { - return skipDotfiles; - } - - // config file skip_dir parameter - bool isDirNameExcluded(string name) - { - // Does the directory name match skip_dir config entry? - // Returns true if the name matches a skip_dir config entry - // Returns false if no match - log.vdebug("skip_dir evaluation for: ", name); - - // Try full path match first - if (!name.matchFirst(dirmask).empty) { - log.vdebug("'!name.matchFirst(dirmask).empty' returned true = matched"); - return true; - } else { - // Do we check the base name as well? - if (!skipDirStrictMatch) { - log.vdebug("No Strict Matching Enforced"); - - // Test the entire path working backwards from child - string path = buildNormalizedPath(name); - string checkPath; - auto paths = pathSplitter(path); - - foreach_reverse(directory; paths) { - if (directory != "/") { - // This will add a leading '/' but that needs to be stripped to check - checkPath = "/" ~ directory ~ checkPath; - if(!checkPath.strip('/').matchFirst(dirmask).empty) { - log.vdebug("'!checkPath.matchFirst(dirmask).empty' returned true = matched"); - return true; - } - } - } - } else { - log.vdebug("Strict Matching Enforced - No Match"); - } - } - // no match - return false; - } - - // config file skip_file parameter - bool isFileNameExcluded(string name) - { - // Does the file name match skip_file config entry? - // Returns true if the name matches a skip_file config entry - // Returns false if no match - log.vdebug("skip_file evaluation for: ", name); - - // Try full path match first - if (!name.matchFirst(mask).empty) { - return true; - } else { - // check just the file name - string filename = baseName(name); - if(!filename.matchFirst(mask).empty) { - return true; - } - } - // no match - return false; - } - - // Match against sync_list only - bool isPathExcludedViaSyncList(string path) - { - // Debug output that we are performing a 'sync_list' inclusion / exclusion test - return .isPathExcluded(path, paths); - } - - // Match against skip_dir, skip_file & sync_list entries - bool isPathExcludedMatchAll(string path) - { - return .isPathExcluded(path, paths) || .isPathMatched(path, mask) || .isPathMatched(path, dirmask); - } - - // is the path a dotfile? - bool isDotFile(string path) - { - // always allow the root - if (path == ".") return false; - - path = buildNormalizedPath(path); - auto paths = pathSplitter(path); - foreach(base; paths) { - if (startsWith(base, ".")){ - return true; - } - } - return false; - } - - // is business shared folder matched - bool isSharedFolderMatched(string name) - { - // if there are no shared folder always return false - if (businessSharedFoldersList.empty) return false; - - if (!name.matchFirst(businessSharedFoldersList).empty) { - return true; - } else { - // try a direct comparison just in case - foreach (userFolder; businessSharedFoldersList) { - if (userFolder == name) { - // direct match - log.vdebug("'matchFirst' failed to match, however direct comparison was matched: ", name); - return true; - } - } - return false; - } - } - - // is business shared folder included - bool isPathIncluded(string path, string[] allowedPaths) - { - // always allow the root - if (path == ".") return true; - // if there are no allowed paths always return true - if (allowedPaths.empty) return true; - - path = buildNormalizedPath(path); - foreach (allowed; allowedPaths) { - auto comm = commonPrefix(path, allowed); - if (comm.length == path.length) { - // the given path is contained in an allowed path - return true; - } - if (comm.length == allowed.length && path[comm.length] == '/') { - // the given path is a subitem of an allowed path - return true; - } - } - return false; - } -} - -// test if the given path is not included in the allowed paths -// if there are no allowed paths always return false -private bool isPathExcluded(string path, string[] allowedPaths) -{ - // function variables - bool exclude = false; - bool exludeDirectMatch = false; // will get updated to true, if there is a pattern match to sync_list entry - bool excludeMatched = false; // will get updated to true, if there is a pattern match to sync_list entry - bool finalResult = true; // will get updated to false, if pattern match to sync_list entry - int offset; - string wildcard = "*"; - - // always allow the root - if (path == ".") return false; - // if there are no allowed paths always return false - if (allowedPaths.empty) return false; - path = buildNormalizedPath(path); - log.vdebug("Evaluation against 'sync_list' for this path: ", path); - log.vdebug("[S]exclude = ", exclude); - log.vdebug("[S]exludeDirectMatch = ", exludeDirectMatch); - log.vdebug("[S]excludeMatched = ", excludeMatched); - - // unless path is an exact match, entire sync_list entries need to be processed to ensure - // negative matches are also correctly detected - foreach (allowedPath; allowedPaths) { - // is this an inclusion path or finer grained exclusion? - switch (allowedPath[0]) { - case '-': - // sync_list path starts with '-', this user wants to exclude this path - exclude = true; - // If the sync_list entry starts with '-/' offset needs to be 2, else 1 - if (startsWith(allowedPath, "-/")){ - // Offset needs to be 2 - offset = 2; - } else { - // Offset needs to be 1 - offset = 1; - } - break; - case '!': - // sync_list path starts with '!', this user wants to exclude this path - exclude = true; - // If the sync_list entry starts with '!/' offset needs to be 2, else 1 - if (startsWith(allowedPath, "!/")){ - // Offset needs to be 2 - offset = 2; - } else { - // Offset needs to be 1 - offset = 1; - } - break; - case '/': - // sync_list path starts with '/', this user wants to include this path - // but a '/' at the start causes matching issues, so use the offset for comparison - exclude = false; - offset = 1; - break; - - default: - // no negative pattern, default is to not exclude - exclude = false; - offset = 0; - } - - // What are we comparing against? - log.vdebug("Evaluation against 'sync_list' entry: ", allowedPath); - - // Generate the common prefix from the path vs the allowed path - auto comm = commonPrefix(path, allowedPath[offset..$]); - - // Is path is an exact match of the allowed path? - if (comm.length == path.length) { - // we have a potential exact match - // strip any potential '/*' from the allowed path, to avoid a potential lesser common match - string strippedAllowedPath = strip(allowedPath[offset..$], "/*"); - - if (path == strippedAllowedPath) { - // we have an exact path match - log.vdebug("exact path match"); - if (!exclude) { - log.vdebug("Evaluation against 'sync_list' result: direct match"); - finalResult = false; - // direct match, break and go sync - break; - } else { - log.vdebug("Evaluation against 'sync_list' result: direct match - path to be excluded"); - // do not set excludeMatched = true here, otherwise parental path also gets excluded - // flag exludeDirectMatch so that a 'wildcard match' will not override this exclude - exludeDirectMatch = true; - // final result - finalResult = true; - } - } else { - // no exact path match, but something common does match - log.vdebug("something 'common' matches the input path"); - auto splitAllowedPaths = pathSplitter(strippedAllowedPath); - string pathToEvaluate = ""; - foreach(base; splitAllowedPaths) { - pathToEvaluate ~= base; - if (path == pathToEvaluate) { - // The input path matches what we want to evaluate against as a direct match - if (!exclude) { - log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item"); - finalResult = false; - // direct match, break and go sync - break; - } else { - log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item but to be excluded"); - finalResult = true; - // do not set excludeMatched = true here, otherwise parental path also gets excluded - } - } - pathToEvaluate ~= dirSeparator; - } - } - } - - // Is path is a subitem/sub-folder of the allowed path? - if (comm.length == allowedPath[offset..$].length) { - // The given path is potentially a subitem of an allowed path - // We want to capture sub-folders / files of allowed paths here, but not explicitly match other items - // if there is no wildcard - auto subItemPathCheck = allowedPath[offset..$] ~ "/"; - if (canFind(path, subItemPathCheck)) { - // The 'path' includes the allowed path, and is 'most likely' a sub-path item - if (!exclude) { - log.vdebug("Evaluation against 'sync_list' result: parental path match"); - finalResult = false; - // parental path matches, break and go sync - break; - } else { - log.vdebug("Evaluation against 'sync_list' result: parental path match but must be excluded"); - finalResult = true; - excludeMatched = true; - } - } - } - - // Does the allowed path contain a wildcard? (*) - if (canFind(allowedPath[offset..$], wildcard)) { - // allowed path contains a wildcard - // manually replace '*' for '.*' to be compatible with regex - string regexCompatiblePath = replace(allowedPath[offset..$], "*", ".*"); - auto allowedMask = regex(regexCompatiblePath); - if (matchAll(path, allowedMask)) { - // regex wildcard evaluation matches - // if we have a prior pattern match for an exclude, excludeMatched = true - if (!exclude && !excludeMatched && !exludeDirectMatch) { - // nothing triggered an exclusion before evaluation against wildcard match attempt - log.vdebug("Evaluation against 'sync_list' result: wildcard pattern match"); - finalResult = false; - } else { - log.vdebug("Evaluation against 'sync_list' result: wildcard pattern matched but must be excluded"); - finalResult = true; - excludeMatched = true; - } - } - } - } - // Interim results - log.vdebug("[F]exclude = ", exclude); - log.vdebug("[F]exludeDirectMatch = ", exludeDirectMatch); - log.vdebug("[F]excludeMatched = ", excludeMatched); - - // If exclude or excludeMatched is true, then finalResult has to be true - if ((exclude) || (excludeMatched) || (exludeDirectMatch)) { - finalResult = true; - } - - // results - if (finalResult) { - log.vdebug("Evaluation against 'sync_list' final result: EXCLUDED"); - } else { - log.vdebug("Evaluation against 'sync_list' final result: included for sync"); - } - return finalResult; -} - -// test if the given path is matched by the regex expression. -// recursively test up the tree. -private bool isPathMatched(string path, Regex!char mask) { - path = buildNormalizedPath(path); - auto paths = pathSplitter(path); - - string prefix = ""; - foreach(base; paths) { - prefix ~= base; - if (!path.matchFirst(mask).empty) { - // the given path matches something which we should skip - return true; - } - prefix ~= dirSeparator; - } - return false; -} - -// unit tests -unittest -{ - assert(isPathExcluded("Documents2", ["Documents"])); - assert(!isPathExcluded("Documents", ["Documents"])); - assert(!isPathExcluded("Documents/a.txt", ["Documents"])); - assert(isPathExcluded("Hello/World", ["Hello/John"])); - assert(!isPathExcluded(".", ["Documents"])); -} diff --git a/src/sqlite.d b/src/sqlite.d index 5e1839ece..97e39289d 100644 --- a/src/sqlite.d +++ b/src/sqlite.d @@ -1,27 +1,29 @@ +// What is this module called? module sqlite; + +// What does this module require to function? import std.stdio; import etc.c.sqlite3; import std.string: fromStringz, toStringz; import core.stdc.stdlib; import std.conv; -static import log; + +// What other modules that we have created do we need to import? +import log; extern (C) immutable(char)* sqlite3_errstr(int); // missing from the std library -static this() -{ +static this() { if (sqlite3_libversion_number() < 3006019) { throw new SqliteException("sqlite 3.6.19 or newer is required"); } } -private string ifromStringz(const(char)* cstr) -{ +private string ifromStringz(const(char)* cstr) { return fromStringz(cstr).dup; } -class SqliteException: Exception -{ +class SqliteException: Exception { @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__, Throwable next = null) { super(msg, file, line, next); @@ -33,28 +35,23 @@ class SqliteException: Exception } } -struct Database -{ +struct Database { private sqlite3* pDb; - this(const(char)[] filename) - { + this(const(char)[] filename) { open(filename); } - ~this() - { + ~this() { close(); } - int db_checkpoint() - { + int db_checkpoint() { return sqlite3_wal_checkpoint(pDb, null); } - void dump_open_statements() - { - log.log("Dumpint open statements: \n"); + void dump_open_statements() { + log.log("Dumping open statements: \n"); auto p = sqlite3_next_stmt(pDb, null); while (p != null) { log.log (" - " ~ ifromStringz(sqlite3_sql(p)) ~ "\n"); @@ -63,13 +60,12 @@ struct Database } - void open(const(char)[] filename) - { + void open(const(char)[] filename) { // https://www.sqlite.org/c3ref/open.html int rc = sqlite3_open(toStringz(filename), &pDb); if (rc == SQLITE_CANTOPEN) { // Database cannot be opened - log.error("\nThe database cannot be opened. Please check the permissions of ~/.config/onedrive/items.sqlite3\n"); + log.error("\nThe database cannot be opened. Please check the permissions of " ~ filename ~ "\n"); close(); exit(-1); } @@ -81,8 +77,7 @@ struct Database sqlite3_extended_result_codes(pDb, 1); // always use extended result codes } - void exec(const(char)[] sql) - { + void exec(const(char)[] sql) { // https://www.sqlite.org/c3ref/exec.html int rc = sqlite3_exec(pDb, toStringz(sql), null, null, null); if (rc != SQLITE_OK) { @@ -93,8 +88,7 @@ struct Database } } - int getVersion() - { + int getVersion() { int userVersion; extern (C) int callback(void* user_version, int count, char** column_text, char** column_name) { import core.stdc.stdlib: atoi; @@ -108,19 +102,16 @@ struct Database return userVersion; } - string getErrorMessage() - { + string getErrorMessage() { return ifromStringz(sqlite3_errmsg(pDb)); } - void setVersion(int userVersion) - { + void setVersion(int userVersion) { import std.conv: to; exec("PRAGMA user_version=" ~ to!string(userVersion)); } - Statement prepare(const(char)[] zSql) - { + Statement prepare(const(char)[] zSql) { Statement s; // https://www.sqlite.org/c3ref/prepare.html int rc = sqlite3_prepare_v2(pDb, zSql.ptr, cast(int) zSql.length, &s.pStmt, null); @@ -130,41 +121,34 @@ struct Database return s; } - void close() - { + void close() { // https://www.sqlite.org/c3ref/close.html sqlite3_close_v2(pDb); pDb = null; } } -struct Statement -{ - struct Result - { +struct Statement { + struct Result { private sqlite3_stmt* pStmt; private const(char)[][] row; - private this(sqlite3_stmt* pStmt) - { + private this(sqlite3_stmt* pStmt) { this.pStmt = pStmt; step(); // initialize the range } - @property bool empty() - { + @property bool empty() { return row.length == 0; } - @property auto front() - { + @property auto front() { return row; } alias step popFront; - void step() - { + void step() { // https://www.sqlite.org/c3ref/step.html int rc = sqlite3_step(pStmt); if (rc == SQLITE_BUSY) { @@ -194,14 +178,12 @@ struct Statement private sqlite3_stmt* pStmt; - ~this() - { + ~this() { // https://www.sqlite.org/c3ref/finalize.html sqlite3_finalize(pStmt); } - void bind(int index, const(char)[] value) - { + void bind(int index, const(char)[] value) { reset(); // https://www.sqlite.org/c3ref/bind_blob.html int rc = sqlite3_bind_text(pStmt, index, value.ptr, cast(int) value.length, SQLITE_STATIC); @@ -210,47 +192,16 @@ struct Statement } } - Result exec() - { + Result exec() { reset(); return Result(pStmt); } - private void reset() - { + private void reset() { // https://www.sqlite.org/c3ref/reset.html int rc = sqlite3_reset(pStmt); if (rc != SQLITE_OK) { throw new SqliteException(ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt)))); } } -} - -unittest -{ - auto db = Database(":memory:"); - db.exec("CREATE TABLE test( - id TEXT PRIMARY KEY, - value TEXT - )"); - - assert(db.getVersion() == 0); - db.setVersion(1); - assert(db.getVersion() == 1); - - auto s = db.prepare("INSERT INTO test VALUES (?, ?)"); - s.bind(1, "key1"); - s.bind(2, "value"); - s.exec(); - s.bind(1, "key2"); - s.bind(2, null); - s.exec(); - - s = db.prepare("SELECT * FROM test ORDER BY id ASC"); - auto r = s.exec(); - assert(r.front[0] == "key1"); - r.popFront(); - assert(r.front[1] == null); - r.popFront(); - assert(r.empty); -} +} \ No newline at end of file diff --git a/src/sync.d b/src/sync.d index 346d8c00c..2cfae76f0 100644 --- a/src/sync.d +++ b/src/sync.d @@ -1,3228 +1,2179 @@ +// What is this module called? +module syncEngine; + +// What does this module require to function? +import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit; +import core.thread; +import core.time; import std.algorithm; -import std.array: array; -import std.datetime; -import std.exception: enforce; -import std.file, std.json, std.path; -import std.regex; -import std.stdio, std.string, std.uni, std.uri; +import std.array; +import std.concurrency; +import std.container.rbtree; import std.conv; +import std.datetime; import std.encoding; -import core.time, core.thread; -import core.stdc.stdlib; -import config, itemdb, onedrive, selective, upload, util; -static import log; - -// threshold after which files will be uploaded using an upload session -private long thresholdFileSize = 4 * 2^^20; // 4 MiB - -// flag to set whether local files should be deleted from OneDrive -private bool noRemoteDelete = false; - -// flag to set whether the local file should be deleted once it is successfully uploaded to OneDrive -private bool localDeleteAfterUpload = false; - -// flag to set if we are running as uploadOnly -private bool uploadOnly = false; - -// Do we configure to disable the upload validation routine -private bool disableUploadValidation = false; - -// Do we configure to disable the download validation routine -private bool disableDownloadValidation = false; - -// Do we perform a local cleanup of files that are 'extra' on the local file system, when using --download-only -private bool cleanupLocalFiles = false; - -private bool isItemFolder(const ref JSONValue item) -{ - return ("folder" in item) != null; -} - -private bool isItemFile(const ref JSONValue item) -{ - return ("file" in item) != null; -} - -private bool isItemDeleted(const ref JSONValue item) -{ - return ("deleted" in item) != null; -} - -private bool isItemRoot(const ref JSONValue item) -{ - return ("root" in item) != null; -} - -private bool isItemRemote(const ref JSONValue item) -{ - return ("remoteItem" in item) != null; -} - -private bool hasParentReference(const ref JSONValue item) -{ - return ("parentReference" in item) != null; -} - -private bool hasParentReferenceId(const ref JSONValue item) -{ - return ("id" in item["parentReference"]) != null; -} - -private bool hasParentReferencePath(const ref JSONValue item) -{ - return ("path" in item["parentReference"]) != null; -} - -private bool isMalware(const ref JSONValue item) -{ - return ("malware" in item) != null; -} - -private bool hasFileSize(const ref JSONValue item) -{ - return ("size" in item) != null; -} +import std.exception; +import std.file; +import std.json; +import std.parallelism; +import std.path; +import std.range; +import std.regex; +import std.stdio; +import std.string; +import std.uni; +import std.uri; +import std.utf; -private bool hasId(const ref JSONValue item) -{ - return ("id" in item) != null; -} +// What other modules that we have created do we need to import? +import config; +import log; +import util; +import onedrive; +import itemdb; +import clientSideFiltering; +import progress; -private bool hasHashes(const ref JSONValue item) -{ - return ("hashes" in item["file"]) != null; +class posixException: Exception { + @safe pure this(string localTargetName, string remoteTargetName) { + string msg = format("POSIX 'case-insensitive match' between '%s' (local) and '%s' (online) which violates the Microsoft OneDrive API namespace convention", localTargetName, remoteTargetName); + super(msg); + } } -private bool hasQuickXorHash(const ref JSONValue item) -{ - return ("quickXorHash" in item["file"]["hashes"]) != null; +class accountDetailsException: Exception { + @safe pure this() { + string msg = format("Unable to query OneDrive API to obtain required account details"); + super(msg); + } } -private bool hasSHA256Hash(const ref JSONValue item) -{ - return ("sha256Hash" in item["file"]["hashes"]) != null; +class SyncException: Exception { + @nogc @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__) { + super(msg, file, line); + } } -private bool isDotFile(const(string) path) -{ - // always allow the root - if (path == ".") return false; - auto paths = pathSplitter(buildNormalizedPath(path)); - foreach(base; paths) { - if (startsWith(base, ".")){ - return true; +class SyncEngine { + // Class Variables + ApplicationConfig appConfig; + OneDriveApi oneDriveApiInstance; + ItemDatabase itemDB; + ClientSideFiltering selectiveSync; + + // Array of directory databaseItem.id to skip while applying the changes. + // These are the 'parent path' id's that are being excluded, so if the parent id is in here, the child needs to be skipped as well + RedBlackTree!string skippedItems = redBlackTree!string(); + // Array of databaseItem.id to delete after the changes have been downloaded + string[2][] idsToDelete; + // Array of JSON items which are files or directories that are not 'root', skipped or to be deleted, that need to be processed + JSONValue[] jsonItemsToProcess; + // Array of JSON items which are files that are not 'root', skipped or to be deleted, that need to be downloaded + JSONValue[] fileJSONItemsToDownload; + // Array of paths that failed to download + string[] fileDownloadFailures; + // Array of all OneDrive driveId's that have been seen + string[] driveIDsArray; + // List of items we fake created when using --dry-run + string[2][] idsFaked; + // List of paths we fake deleted when using --dry-run + string[] pathFakeDeletedArray; + // Array of database Parent Item ID, Item ID & Local Path where the content has changed and needs to be uploaded + string[3][] databaseItemsWhereContentHasChanged; + // Array of local file paths that need to be uploaded as new itemts to OneDrive + string[] newLocalFilesToUploadToOneDrive; + // Array of local file paths that failed to be uploaded to OneDrive + string[] fileUploadFailures; + // List of path names changed online, but not changed locally when using --dry-run + string[] pathsRenamed; + // List of paths that were a POSIX case-insensitive match, thus could not be created online + string[] posixViolationPaths; + // List of local paths, that, when using the OneDrive Business Shared Folders feature, then diabling it, folder still exists locally and online + // This list of local paths need to be skipped + string[] businessSharedFoldersOnlineToSkip; + + // Flag that there were upload or download failures listed + bool syncFailures = false; + // Is sync_list configured + bool syncListConfigured = false; + // Was --dry-run used? + bool dryRun = false; + // Was --upload-only used? + bool uploadOnly = false; + // Was --remove-source-files used? + // Flag to set whether the local file should be deleted once it is successfully uploaded to OneDrive + bool localDeleteAfterUpload = false; + + // Do we configure to disable the download validation routine due to --disable-download-validation + // We will always validate our downloads + // However, when downloading files from SharePoint, the OneDrive API will not advise the correct file size + // which means that the application thinks the file download has failed as the size is different / hash is different + // See: https://github.com/abraunegg/onedrive/discussions/1667 + bool disableDownloadValidation = false; + + // Do we configure to disable the upload validation routine due to --disable-upload-validation + // We will always validate our uploads + // However, when uploading a file that can contain metadata SharePoint will associate some + // metadata from the library the file is uploaded to directly in the file which breaks this validation. + // See: https://github.com/abraunegg/onedrive/issues/205 + // See: https://github.com/OneDrive/onedrive-api-docs/issues/935 + bool disableUploadValidation = false; + + // Do we perform a local cleanup of files that are 'extra' on the local file system, when using --download-only + bool cleanupLocalFiles = false; + // Are we performing a --single-directory sync ? + bool singleDirectoryScope = false; + string singleDirectoryScopeDriveId; + string singleDirectoryScopeItemId; + // Is National Cloud Deployments configured ? + bool nationalCloudDeployment = false; + // Do we configure not to perform a remote file delete if --upload-only & --no-remote-delete configured + bool noRemoteDelete = false; + // Is bypass_data_preservation set via config file + // Local data loss MAY occur in this scenario + bool bypassDataPreservation = false; + // Maximum file size upload + // https://support.microsoft.com/en-us/office/invalid-file-names-and-file-types-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us + // July 2020, maximum file size for all accounts is 100GB + // January 2021, maximum file size for all accounts is 250GB + ulong maxUploadFileSize = 268435456000; // 250GB + // Threshold after which files will be uploaded using an upload session + ulong sessionThresholdFileSize = 4 * 2^^20; // 4 MiB + // File size limit for file operations that the user has configured + ulong fileSizeLimit; + // Total data to upload + ulong totalDataToUpload; + // How many items have been processed for the active operation + ulong processedCount; + // Are we creating a simulated /delta response? This is critically important in terms of how we 'update' the database + bool generateSimulatedDeltaResponse = false; + // Store the latest DeltaLink + string latestDeltaLink; + + // Configure this class instance + this(ApplicationConfig appConfig, ItemDatabase itemDB, ClientSideFiltering selectiveSync) { + // Configure the class varaible to consume the application configuration + this.appConfig = appConfig; + // Configure the class varaible to consume the database configuration + this.itemDB = itemDB; + // Configure the class variable to consume the selective sync (skip_dir, skip_file and sync_list) configuration + this.selectiveSync = selectiveSync; + + // Configure the dryRun flag to capture if --dry-run was used + // Application startup already flagged we are also in a --dry-run state, so no need to output anything else here + this.dryRun = appConfig.getValueBool("dry_run"); + + // Configure file size limit + if (appConfig.getValueLong("skip_size") != 0) { + fileSizeLimit = appConfig.getValueLong("skip_size") * 2^^20; + fileSizeLimit = (fileSizeLimit == 0) ? ulong.max : fileSizeLimit; } - } - return false; -} - -// construct an Item struct from a JSON driveItem -private Item makeDatabaseItem(const ref JSONValue driveItem) -{ - Item item = { - id: driveItem["id"].str, - name: "name" in driveItem ? driveItem["name"].str : null, // name may be missing for deleted files in OneDrive Biz - eTag: "eTag" in driveItem ? driveItem["eTag"].str : null, // eTag is not returned for the root in OneDrive Biz - cTag: "cTag" in driveItem ? driveItem["cTag"].str : null, // cTag is missing in old files (and all folders in OneDrive Biz) - }; - - // OneDrive API Change: https://github.com/OneDrive/onedrive-api-docs/issues/834 - // OneDrive no longer returns lastModifiedDateTime if the item is deleted by OneDrive - if(isItemDeleted(driveItem)){ - // Set mtime to SysTime(0) - item.mtime = SysTime(0); - } else { - // Item is not in a deleted state - // Resolve 'Key not found: fileSystemInfo' when then item is a remote item - // https://github.com/abraunegg/onedrive/issues/11 - if (isItemRemote(driveItem)) { - // remoteItem is a OneDrive object that exists on a 'different' OneDrive drive id, when compared to account default - // Normally, the 'remoteItem' field will contain 'fileSystemInfo' however, if the user uses the 'Add Shortcut ..' option in OneDrive WebUI - // to create a 'link', this object, whilst remote, does not have 'fileSystemInfo' in the expected place, thus leading to a application crash - // See: https://github.com/abraunegg/onedrive/issues/1533 - if ("fileSystemInfo" in driveItem["remoteItem"]) { - // 'fileSystemInfo' is in 'remoteItem' which will be the majority of cases - item.mtime = SysTime.fromISOExtString(driveItem["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"].str); - } else { - // is a remote item, but 'fileSystemInfo' is missing from 'remoteItem' - item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str); - } - } else { - // item exists on account default drive id - item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str); + + // Is there a sync_list file present? + if (exists(appConfig.syncListFilePath)) this.syncListConfigured = true; + + // Configure the uploadOnly flag to capture if --upload-only was used + if (appConfig.getValueBool("upload_only")) { + log.vdebug("Configuring uploadOnly flag to TRUE as --upload-only passed in or configured"); + this.uploadOnly = true; } - } - if (isItemFile(driveItem)) { - item.type = ItemType.file; - } else if (isItemFolder(driveItem)) { - item.type = ItemType.dir; - } else if (isItemRemote(driveItem)) { - item.type = ItemType.remote; - } else { - // do not throw exception, item will be removed in applyDifferences() - } - - // root and remote items do not have parentReference - if (!isItemRoot(driveItem) && ("parentReference" in driveItem) != null) { - item.driveId = driveItem["parentReference"]["driveId"].str; - if (hasParentReferenceId(driveItem)) { - item.parentId = driveItem["parentReference"]["id"].str; + // Configure the localDeleteAfterUpload flag + if (appConfig.getValueBool("remove_source_files")) { + log.vdebug("Configuring localDeleteAfterUpload flag to TRUE as --remove-source-files passed in or configured"); + this.localDeleteAfterUpload = true; } - } - - // extract the file hash - if (isItemFile(driveItem) && ("hashes" in driveItem["file"])) { - // Get quickXorHash - if ("quickXorHash" in driveItem["file"]["hashes"]) { - item.quickXorHash = driveItem["file"]["hashes"]["quickXorHash"].str; - } else { - log.vdebug("quickXorHash is missing from ", driveItem["id"].str); + + // Configure the disableDownloadValidation flag + if (appConfig.getValueBool("disable_download_validation")) { + log.vdebug("Configuring disableDownloadValidation flag to TRUE as --disable-download-validation passed in or configured"); + this.disableDownloadValidation = true; + } + + // Configure the disableUploadValidation flag + if (appConfig.getValueBool("disable_upload_validation")) { + log.vdebug("Configuring disableUploadValidation flag to TRUE as --disable-upload-validation passed in or configured"); + this.disableUploadValidation = true; + } + + // Do we configure to clean up local files if using --download-only ? + if ((appConfig.getValueBool("download_only")) && (appConfig.getValueBool("cleanup_local_files"))) { + // --download-only and --cleanup-local-files were passed in + log.log("WARNING: Application has been configured to cleanup local files that are not present online."); + log.log("WARNING: Local data loss MAY occur in this scenario if you are expecting data to remain archived locally."); + // Set the flag + this.cleanupLocalFiles = true; + } + + // Do we configure to NOT perform a remote delete if --upload-only & --no-remote-delete configured ? + if ((appConfig.getValueBool("upload_only")) && (appConfig.getValueBool("no_remote_delete"))) { + // --upload-only and --no-remote-delete were passed in + log.log("WARNING: Application has been configured NOT to cleanup remote files that are deleted locally."); + // Set the flag + this.noRemoteDelete = true; + } + + // Are we forcing to use /children scan instead of /delta to simulate National Cloud Deployment use of /children? + if (appConfig.getValueBool("force_children_scan")) { + log.log("Forcing client to use /children API call rather than /delta API to retrieve objects from the OneDrive API"); + this.nationalCloudDeployment = true; + } + + // Are we forcing the client to bypass any data preservation techniques to NOT rename any local files if there is a conflict? + // The enabling of this function could lead to data loss + if (appConfig.getValueBool("bypass_data_preservation")) { + log.log("WARNING: Application has been configured to bypass local data preservation in the event of file conflict."); + log.log("WARNING: Local data loss MAY occur in this scenario."); + this.bypassDataPreservation = true; + } + + // Did the user configure a specific rate limit for the application? + if (appConfig.getValueLong("rate_limit") > 0) { + // User configured rate limit + log.log("User Configured Rate Limit: ", appConfig.getValueLong("rate_limit")); + + // If user provided rate limit is < 131072, flag that this is too low, setting to the recommended minimum of 131072 + if (appConfig.getValueLong("rate_limit") < 131072) { + // user provided limit too low + log.log("WARNING: User configured rate limit too low for normal application processing and preventing application timeouts. Overriding to recommended minimum of 131072 (128KB/s)"); + appConfig.setValueLong("rate_limit", 131072); + } } - // sha256Hash - if ("sha256Hash" in driveItem["file"]["hashes"]) { - item.sha256Hash = driveItem["file"]["hashes"]["sha256Hash"].str; + + // Did the user downgrade all HTTP operations to force HTTP 1.1 + if (appConfig.getValueBool("force_http_11")) { + // User is forcing downgrade to curl to use HTTP 1.1 for all operations + log.vlog("Downgrading all HTTP operations to HTTP/1.1 due to user configuration"); } else { - log.vdebug("sha256Hash is missing from ", driveItem["id"].str); + // Use curl defaults + log.vdebug("Using Curl defaults for HTTP operational protocol version (potentially HTTP/2)"); } - } - - if (isItemRemote(driveItem)) { - item.remoteDriveId = driveItem["remoteItem"]["parentReference"]["driveId"].str; - item.remoteId = driveItem["remoteItem"]["id"].str; - } - - // National Cloud Deployments do not support /delta as a query - // Thus we need to track in the database that this item is in sync - // As we are making an item, set the syncStatus to Y - // ONLY when using a National Cloud Deployment, all the existing DB entries will get set to N - // so when processing /children, it can be identified what the 'deleted' difference is - item.syncStatus = "Y"; - - return item; -} - -private bool testFileHash(const(string) path, const ref Item item) -{ - // Generate QuickXORHash first before others - if (item.quickXorHash) { - if (item.quickXorHash == computeQuickXorHash(path)) return true; - } else if (item.sha256Hash) { - if (item.sha256Hash == computeSHA256Hash(path)) return true; } - return false; -} - -class SyncException: Exception -{ - @nogc @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__) - { - super(msg, file, line); - } -} - -final class SyncEngine -{ - private Config cfg; - private OneDriveApi onedrive; - private ItemDatabase itemdb; - private UploadSession session; - private SelectiveSync selectiveSync; - // list of items to skip while applying the changes - private string[] skippedItems; - // list of items to delete after the changes has been downloaded - private string[2][] idsToDelete; - // list of items we fake created when using --dry-run - private string[2][] idsFaked; - // list of directory names changed online, but not changed locally when using --dry-run - private string[] pathsRenamed; - // default drive id - private string defaultDriveId; - // default root id - private string defaultRootId; - // type of OneDrive account - private string accountType; - // free space remaining at init() - private long remainingFreeSpace; - // file size limit for a new file - private long newSizeLimit; - // is file malware flag - private bool malwareDetected = false; - // download filesystem issue flag - private bool downloadFailed = false; - // upload failure - OneDrive or filesystem issue (reading data) - private bool uploadFailed = false; - // initialization has been done - private bool initDone = false; - // sync engine dryRun flag - private bool dryRun = false; - // quota details available - private bool quotaAvailable = true; - // quota details restricted - private bool quotaRestricted = false; - // sync business shared folders flag - private bool syncBusinessFolders = false; - // single directory scope flag - private bool singleDirectoryScope = false; - // is sync_list configured - private bool syncListConfigured = false; - // sync_list new folder added, trigger delta scan override - private bool oneDriveFullScanTrigger = false; - // is bypass_data_preservation set via config file - // Local data loss MAY occur in this scenario - private bool bypassDataPreservation = false; - // is National Cloud Deployments configured - private bool nationalCloudDeployment = false; - // has performance processing timings been requested - private bool displayProcessingTime = false; - // array of all OneDrive driveId's for use with OneDrive Business Folders - private string[] driveIDsArray; - this(Config cfg, OneDriveApi onedrive, ItemDatabase itemdb, SelectiveSync selectiveSync) - { - assert(onedrive && itemdb && selectiveSync); - this.cfg = cfg; - this.onedrive = onedrive; - this.itemdb = itemdb; - this.selectiveSync = selectiveSync; - // session = UploadSession(onedrive, cfg.uploadStateFilePath); - this.dryRun = cfg.getValueBool("dry_run"); - this.newSizeLimit = cfg.getValueLong("skip_size") * 2^^20; - this.newSizeLimit = (this.newSizeLimit == 0) ? long.max : this.newSizeLimit; - } - - void reset() - { - initDone=false; - } - - void init() - { - // Set accountType, defaultDriveId, defaultRootId & remainingFreeSpace once and reuse where possible - JSONValue oneDriveDetails; - JSONValue oneDriveRootDetails; - - if (initDone) { - return; - } - - session = UploadSession(onedrive, cfg.uploadStateFilePath); + // Initialise the Sync Engine class + bool initialise() { - // Need to catch 400 or 5xx server side errors at initialization - // Get Default Drive - try { - oneDriveDetails = onedrive.getDefaultDrive(); - } catch (OneDriveException e) { - log.vdebug("oneDriveDetails = onedrive.getDefaultDrive() generated a OneDriveException"); - if (e.httpStatusCode == 400) { - // OneDrive responded with 400 error: Bad Request - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - - // Check this - if (cfg.getValueString("drive_id").length) { - writeln(); - log.error("ERROR: Check your 'drive_id' entry in your configuration file as it may be incorrect"); - writeln(); - } - // Must exit here - onedrive.shutdown(); + // create a new instance of the OneDrive API + oneDriveApiInstance = new OneDriveApi(appConfig); + if (oneDriveApiInstance.initialise()) { + try { + // Get the relevant default account & drive details + getDefaultDriveDetails(); + } catch (accountDetailsException exception) { + // details could not be queried + log.error(exception.msg); + // Shutdown API instance + oneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(oneDriveApiInstance); exit(-1); } - if (e.httpStatusCode == 401) { - // HTTP request returned status code 401 (Unauthorized) - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - handleClientUnauthorised(); - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling init();"); - init(); - // return back to original call - return; + + try { + // Get the relevant default account & drive details + getDefaultRootDetails(); + } catch (accountDetailsException exception) { + // details could not be queried + log.error(exception.msg); + // Shutdown API instance + oneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(oneDriveApiInstance); + exit(-1); } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); + + try { + // Display details + displaySyncEngineDetails(); + } catch (accountDetailsException exception) { + // details could not be queried + log.error(exception.msg); + // Shutdown API instance + oneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(oneDriveApiInstance); exit(-1); } + } else { + // API could not be initialised + log.error("OneDrive API could not be initialised with previously used details"); + // Shutdown API instance + oneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(oneDriveApiInstance); + exit(-1); } + log.log("Sync Engine Initialised with new Onedrive API instance"); + // Shutdown API instance + oneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(oneDriveApiInstance); + return true; + } + + // Get Default Drive Details for this Account + void getDefaultDriveDetails() { - // Get Default Root + // Function variables + JSONValue defaultOneDriveDriveDetails; + + // Get Default Drive Details for this Account try { - oneDriveRootDetails = onedrive.getDefaultRoot(); - } catch (OneDriveException e) { - log.vdebug("oneDriveRootDetails = onedrive.getDefaultRoot() generated a OneDriveException"); - if (e.httpStatusCode == 400) { - // OneDrive responded with 400 error: Bad Request - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Check this - if (cfg.getValueString("drive_id").length) { - writeln(); - log.error("ERROR: Check your 'drive_id' entry in your configuration file as it may be incorrect"); - writeln(); - } - // Must exit here - onedrive.shutdown(); - exit(-1); - } - if (e.httpStatusCode == 401) { - // HTTP request returned status code 401 (Unauthorized) - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - handleClientUnauthorised(); - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling init();"); - init(); - // return back to original call - return; - } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + log.vdebug("Getting Account Default Drive Details"); + defaultOneDriveDriveDetails = oneDriveApiInstance.getDefaultDriveDetails(); + } catch (OneDriveException exception) { + log.vdebug("defaultOneDriveDriveDetails = oneDriveApiInstance.getDefaultDriveDetails() generated a OneDriveException"); + + string thisFunctionName = getFunctionName!({}); + + if ((exception.httpStatusCode == 400) || (exception.httpStatusCode == 401)) { + // Handle the 400 | 401 error + handleClientUnauthorised(exception.httpStatusCode, exception.msg); } - } - - if ((oneDriveDetails.type() == JSONType.object) && (oneDriveRootDetails.type() == JSONType.object) && (hasId(oneDriveDetails)) && (hasId(oneDriveRootDetails))) { - // JSON elements are valid - // Debug OneDrive Account details response - log.vdebug("OneDrive Account Details: ", oneDriveDetails); - log.vdebug("OneDrive Account Root Details: ", oneDriveRootDetails); - - // Successfully got details from OneDrive without a server side error such as 'HTTP/1.1 500 Internal Server Error' or 'HTTP/1.1 504 Gateway Timeout' - accountType = oneDriveDetails["driveType"].str; - defaultDriveId = oneDriveDetails["id"].str; - defaultRootId = oneDriveRootDetails["id"].str; - - // get the remaining size from OneDrive API - if ("remaining" in oneDriveDetails["quota"]){ - // use the value provided - remainingFreeSpace = oneDriveDetails["quota"]["remaining"].integer; + + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(oneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) ||(exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to query Account Default Drive Details - retrying applicable request in 30 seconds"); + log.vdebug("defaultOneDriveDriveDetails = oneDriveApiInstance.getDefaultDriveDetails() previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429 and 504 - but loop back calling this function + log.vdebug("Retrying Function: getDefaultDriveDetails()"); + getDefaultDriveDetails(); } else { - // set at zero - remainingFreeSpace = 0; + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); } + } + + // If the JSON response is a correct JSON object, and has an 'id' we can set these details + if ((defaultOneDriveDriveDetails.type() == JSONType.object) && (hasId(defaultOneDriveDriveDetails))) { + log.vdebug("OneDrive Account Default Drive Details: ", defaultOneDriveDriveDetails); + appConfig.accountType = defaultOneDriveDriveDetails["driveType"].str; + appConfig.defaultDriveId = defaultOneDriveDriveDetails["id"].str; - // Make sure that defaultDriveId is in our driveIDs array to use when checking if item is in database - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, defaultDriveId)) { - // Add this drive id to the array to search with - driveIDsArray ~= defaultDriveId; + // Get the initial remaining size from OneDrive API response JSON + // This will be updated as we upload data to OneDrive + if (hasQuota(defaultOneDriveDriveDetails)) { + if ("remaining" in defaultOneDriveDriveDetails["quota"]){ + // use the value provided + appConfig.remainingFreeSpace = defaultOneDriveDriveDetails["quota"]["remaining"].integer; + } } // In some cases OneDrive Business configurations 'restrict' quota details thus is empty / blank / negative value / zero - if (remainingFreeSpace <= 0) { + if (appConfig.remainingFreeSpace <= 0) { // free space is <= 0 .. why ? - if ("remaining" in oneDriveDetails["quota"]){ - // json response contained a 'remaining' value - if (accountType == "personal"){ + if ("remaining" in defaultOneDriveDriveDetails["quota"]) { + if (appConfig.accountType == "personal") { // zero space available log.error("ERROR: OneDrive account currently has zero space available. Please free up some space online."); - quotaAvailable = false; + appConfig.quotaAvailable = false; } else { // zero space available is being reported, maybe being restricted? log.error("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); - quotaRestricted = true; + appConfig.quotaRestricted = true; } } else { // json response was missing a 'remaining' value - if (accountType == "personal"){ + if (appConfig.accountType == "personal") { log.error("ERROR: OneDrive quota information is missing. Potentially your OneDrive account currently has zero space available. Please free up some space online."); - quotaAvailable = false; + appConfig.quotaAvailable = false; } else { // quota details not available log.error("ERROR: OneDrive quota information is being restricted. Please fix by speaking to your OneDrive / Office 365 Administrator."); - quotaRestricted = true; - } + appConfig.quotaRestricted = true; + } } } + // What did we set based on the data from the JSON + log.vdebug("appConfig.accountType = ", appConfig.accountType); + log.vdebug("appConfig.defaultDriveId = ", appConfig.defaultDriveId); + log.vdebug("appConfig.remainingFreeSpace = ", appConfig.remainingFreeSpace); + log.vdebug("appConfig.quotaAvailable = ", appConfig.quotaAvailable); + log.vdebug("appConfig.quotaRestricted = ", appConfig.quotaRestricted); - // Display accountType, defaultDriveId, defaultRootId & remainingFreeSpace for verbose logging purposes - log.vlog("Application version: ", strip(import("version"))); - log.vlog("Account Type: ", accountType); - log.vlog("Default Drive ID: ", defaultDriveId); - log.vlog("Default Root ID: ", defaultRootId); + // Make sure that appConfig.defaultDriveId is in our driveIDs array to use when checking if item is in database + // Keep the driveIDsArray with unique entries only + if (!canFind(driveIDsArray, appConfig.defaultDriveId)) { + // Add this drive id to the array to search with + driveIDsArray ~= appConfig.defaultDriveId; + } + } else { + // Handle the invalid JSON response + throw new accountDetailsException(); + } + } + + // Get Default Root Details for this Account + void getDefaultRootDetails() { + + // Function variables + JSONValue defaultOneDriveRootDetails; + + // Get Default Root Details for this Account + try { + log.vdebug("Getting Account Default Root Details"); + defaultOneDriveRootDetails = oneDriveApiInstance.getDefaultRootDetails(); + } catch (OneDriveException exception) { + log.vdebug("defaultOneDriveRootDetails = oneDriveApiInstance.getDefaultRootDetails() generated a OneDriveException"); + + string thisFunctionName = getFunctionName!({}); + + if ((exception.httpStatusCode == 400) || (exception.httpStatusCode == 401)) { + // Handle the 400 | 401 error + handleClientUnauthorised(exception.httpStatusCode, exception.msg); + } - // What do we display here - if (remainingFreeSpace > 0) { - // Display the actual value - log.vlog("Remaining Free Space: ", remainingFreeSpace); - } else { - // zero or non-zero value or restricted - if (!quotaRestricted){ - log.vlog("Remaining Free Space: 0"); - } else { - log.vlog("Remaining Free Space: Not Available"); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(oneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to query Account Default Root Details - retrying applicable request in 30 seconds"); + log.vdebug("defaultOneDriveRootDetails = oneDriveApiInstance.getDefaultRootDetails() previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.vdebug("Retrying Function: getDefaultRootDetails()"); + getDefaultRootDetails(); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); } + } - // If account type is documentLibrary - then most likely this is a SharePoint repository - // and files 'may' be modified after upload. See: https://github.com/abraunegg/onedrive/issues/205 - if(accountType == "documentLibrary") { - // set this flag for SharePoint regardless of --disable-upload-validation being used - setDisableUploadValidation(); + // If the JSON response is a correct JSON object, and has an 'id' we can set these details + if ((defaultOneDriveRootDetails.type() == JSONType.object) && (hasId(defaultOneDriveRootDetails))) { + log.vdebug("OneDrive Account Default Root Details: ", defaultOneDriveRootDetails); + appConfig.defaultRootId = defaultOneDriveRootDetails["id"].str; + log.vdebug("appConfig.defaultRootId = ", appConfig.defaultRootId); + // Save the item to the database, so the account root drive is is always going to be present in the DB + saveItem(defaultOneDriveRootDetails); + } else { + // Handle the invalid JSON response + throw new accountDetailsException(); + } + } + + // Reset syncFailures to false + void resetSyncFailures() { + // Reset syncFailures to false if these are both empty + if (syncFailures) { + if ((fileDownloadFailures.empty) && (fileUploadFailures.empty)) { + log.log("Resetting syncFailures = false"); + syncFailures = false; + } else { + log.log("File activity array's not empty - not resetting syncFailures"); } + } + } + + // Perform a sync of the OneDrive Account + // - Query /delta + // - If singleDirectoryScope or nationalCloudDeployment is used we need to generate a /delta like response + // - Process changes (add, changes, moves, deletes) + // - Process any items to add (download data to local) + // - Detail any files that we failed to download + // - Process any deletes (remove local data) + void syncOneDriveAccountToLocalDisk() { + + // performFullScanTrueUp value + log.vdebug("Perform a Full Scan True-Up: ", appConfig.fullScanTrueUpRequired); + // Fetch the API response of /delta to track changes on OneDrive + fetchOneDriveDeltaAPIResponse(null, null, null); + // Process any download activities or cleanup actions + processDownloadActivities(); - // Check the local database to ensure the OneDrive Root details are in the database - checkDatabaseForOneDriveRoot(); - - // Check if there is an interrupted upload session - if (session.restore()) { - log.log("Continuing the upload session ..."); - string uploadSessionLocalFilePath = session.getUploadSessionLocalFilePath(); - auto item = session.upload(); + // If singleDirectoryScope is false, we are not targeting a single directory + // but if true, the target 'could' be a shared folder - so dont try and scan it again + if (!singleDirectoryScope) { + // OneDrive Shared Folder Handling + if (appConfig.accountType == "personal") { + // Personal Account Type + // https://github.com/OneDrive/onedrive-api-docs/issues/764 - // is 'item' a valid JSON response and not null - if (item.type() == JSONType.object) { - // Upload did not fail, JSON response contains data - // Are we in an --upload-only & --remove-source-files scenario? - // Use actual config values as we are doing an upload session recovery - if ((cfg.getValueBool("upload_only")) && (cfg.getValueBool("remove_source_files"))) { - // Log that we are deleting a local item - log.log("Removing local file as --upload-only & --remove-source-files configured"); - // are we in a --dry-run scenario? - if (!dryRun) { - // No --dry-run ... process local file delete - if (!uploadSessionLocalFilePath.empty) { - // only perform the delete if we have a valid file path - if (exists(uploadSessionLocalFilePath)) { - // file exists - log.vdebug("Removing local file: ", uploadSessionLocalFilePath); - safeRemove(uploadSessionLocalFilePath); - } + // Get the Remote Items from the Database + Item[] remoteItems = itemDB.selectRemoteItems(); + foreach (remoteItem; remoteItems) { + // Check if this path is specifically excluded by 'skip_dir', but only if 'skip_dir' is not empty + if (appConfig.getValueString("skip_dir") != "") { + // The path that needs to be checked needs to include the '/' + // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched + if (selectiveSync.isDirNameExcluded(remoteItem.name)) { + // This directory name is excluded + log.vlog("Skipping item - excluded by skip_dir config: ", remoteItem.name); + continue; + } + } + + // Directory name is not excluded or skip_dir is not populated + if (!appConfig.surpressLoggingOutput) { + log.log("Syncing this OneDrive Personal Shared Folder: ", remoteItem.name); + } + // Check this OneDrive Personal Shared Folder for changes + fetchOneDriveDeltaAPIResponse(remoteItem.remoteDriveId, remoteItem.remoteId, remoteItem.name); + // Process any download activities or cleanup actions for this OneDrive Personal Shared Folder + processDownloadActivities(); + } + } else { + // Is this a Business Account with Sync Business Shared Items enabled? + if ((appConfig.accountType == "business") && ( appConfig.getValueBool("sync_business_shared_items"))) { + + // Business Account Shared Items Handling + // - OneDrive Business Shared Folder + // - OneDrive Business Shared Files ?? + // - SharePoint Links + + // Get the Remote Items from the Database + Item[] remoteItems = itemDB.selectRemoteItems(); + + foreach (remoteItem; remoteItems) { + // Check if this path is specifically excluded by 'skip_dir', but only if 'skip_dir' is not empty + if (appConfig.getValueString("skip_dir") != "") { + // The path that needs to be checked needs to include the '/' + // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched + if (selectiveSync.isDirNameExcluded(remoteItem.name)) { + // This directory name is excluded + log.vlog("Skipping item - excluded by skip_dir config: ", remoteItem.name); + continue; } } - // as file is removed, we have nothing to add to the local database - log.vdebug("Skipping adding to database as --upload-only & --remove-source-files configured"); - } else { - // save the item - saveItem(item); + + // Directory name is not excluded or skip_dir is not populated + if (!appConfig.surpressLoggingOutput) { + log.log("Syncing this OneDrive Business Shared Folder: ", remoteItem.name); + } + + log.vdebug("Fetching /delta API response for:"); + log.vdebug(" remoteItem.remoteDriveId: ", remoteItem.remoteDriveId); + log.vdebug(" remoteItem.remoteId: ", remoteItem.remoteId); + + // Check this OneDrive Personal Shared Folder for changes + fetchOneDriveDeltaAPIResponse(remoteItem.remoteDriveId, remoteItem.remoteId, remoteItem.name); + + // Process any download activities or cleanup actions for this OneDrive Personal Shared Folder + processDownloadActivities(); } - } else { - // JSON response was not valid, upload failed - log.error("ERROR: File failed to upload. Increase logging verbosity to determine why."); } } - initDone = true; - } else { - // init failure - initDone = false; - // log why - log.error("ERROR: Unable to query OneDrive to initialize application"); - // Debug OneDrive Account details response - log.vdebug("OneDrive Account Details: ", oneDriveDetails); - log.vdebug("OneDrive Account Root Details: ", oneDriveRootDetails); - // Must exit here - onedrive.shutdown(); - exit(-1); } } - - // Configure uploadOnly if function is called - // By default, uploadOnly = false; - void setUploadOnly() - { - uploadOnly = true; - } - - // Configure noRemoteDelete if function is called - // By default, noRemoteDelete = false; - // Meaning we will process local deletes to delete item on OneDrive - void setNoRemoteDelete() - { - noRemoteDelete = true; - } - - // Configure localDeleteAfterUpload if function is called - // By default, localDeleteAfterUpload = false; - // Meaning we will not delete any local file after upload is successful - void setLocalDeleteAfterUpload() - { - localDeleteAfterUpload = true; - } - // set the flag that we are going to sync business shared folders - void setSyncBusinessFolders() - { - syncBusinessFolders = true; - } - - // Configure singleDirectoryScope if function is called + // Configure singleDirectoryScope = true if this function is called // By default, singleDirectoryScope = false - void setSingleDirectoryScope() - { + void setSingleDirectoryScope(string normalisedSingleDirectoryPath) { + + // Function variables + Item searchItem; + JSONValue onlinePathData; + + // Set the main flag singleDirectoryScope = true; + + // What are we doing? + log.log("The OneDrive Client was asked to search for this directory online and create it if it's not located: ", normalisedSingleDirectoryPath); + + // Query the OneDrive API for the specified path online + // In a --single-directory scenario, we need to travervse the entire path that we are wanting to sync + // and then check the path element does it exist online, if it does, is it a POSIX match, or if it does not, create the path + // Once we have searched online, we have the right drive id and item id so that we can downgrade the sync status, then build up + // any object items from that location + // This is because, in a --single-directory scenario, any folder in the entire path tree could be a 'case-insensitive match' + + try { + onlinePathData = queryOneDriveForSpecificPathAndCreateIfMissing(normalisedSingleDirectoryPath, true); + } catch (posixException e) { + displayPosixErrorMessage(e.msg); + log.error("ERROR: Requested directory to search for and potentially create has a 'case-insensitive match' to an existing directory on OneDrive online."); + } + + // Was a valid JSON response provided? + if (onlinePathData.type() == JSONType.object) { + // Valid JSON item was returned + searchItem = makeItem(onlinePathData); + log.vdebug("searchItem: ", searchItem); + + // Is this item a potential Shared Folder? + // Is this JSON a remote object + if (isItemRemote(onlinePathData)) { + // The path we are seeking is remote to our account drive id + searchItem.driveId = onlinePathData["remoteItem"]["parentReference"]["driveId"].str; + searchItem.id = onlinePathData["remoteItem"]["id"].str; + } + + // Set these items so that these can be used as required + singleDirectoryScopeDriveId = searchItem.driveId; + singleDirectoryScopeItemId = searchItem.id; + } else { + log.error("\nThe requested --single-directory path to sync has generated an error. Please correct this error and try again.\n"); + exit(EXIT_FAILURE); + } } - // Configure disableUploadValidation if function is called - // By default, disableUploadValidation = false; - // Meaning we will always validate our uploads - // However, when uploading a file that can contain metadata SharePoint will associate some - // metadata from the library the file is uploaded to directly in the file - // which breaks this validation. See https://github.com/abraunegg/onedrive/issues/205 - void setDisableUploadValidation() - { - disableUploadValidation = true; - log.vdebug("documentLibrary account type - flagging to disable upload validation checks due to Microsoft SharePoint file modification enrichments"); - } - - // Configure disableDownloadValidation if function is called - // By default, disableDownloadValidation = false; - // Meaning we will always validate our downloads - // However, when downloading files from SharePoint, the OneDrive API will not advise the correct file size - // which means that the application thinks the file download has failed as the size is different / hash is different - // See: https://github.com/abraunegg/onedrive/discussions/1667 - void setDisableDownloadValidation() - { - disableDownloadValidation = true; - log.vdebug("Flagging to disable download validation checks due to user request"); - } - - // Issue #658 Handling - // If an existing folder is moved into a sync_list valid path (where it previously was out of scope due to sync_list), - // then set this flag to true, so that on the second 'true-up' sync, we force a rescan of the OneDrive path to capture any 'files' - void setOneDriveFullScanTrigger() - { - oneDriveFullScanTrigger = true; - log.vdebug("Setting oneDriveFullScanTrigger = true due to new folder creation request in a location that is now in-scope which may have previously out of scope"); - } - - // unset method - void unsetOneDriveFullScanTrigger() - { - oneDriveFullScanTrigger = false; - log.vdebug("Setting oneDriveFullScanTrigger = false"); - } - - // set syncListConfigured to true - void setSyncListConfigured() - { - syncListConfigured = true; - log.vdebug("Setting syncListConfigured = true"); - } - - // set bypassDataPreservation to true - void setBypassDataPreservation() - { - bypassDataPreservation = true; - log.vdebug("Setting bypassDataPreservation = true"); - } - - // set nationalCloudDeployment to true - void setNationalCloudDeployment() - { - nationalCloudDeployment = true; - log.vdebug("Setting nationalCloudDeployment = true"); - } - - // set performance timing flag - void setPerformanceProcessingOutput() - { - displayProcessingTime = true; - log.vdebug("Setting displayProcessingTime = true"); - } - - // get performance timing flag - bool getPerformanceProcessingOutput() - { - return displayProcessingTime; - } + // Query OneDrive API for /delta changes and iterate through items online + void fetchOneDriveDeltaAPIResponse(string driveIdToQuery = null, string itemIdToQuery = null, string sharedFolderName = null) { + + string deltaLink = null; + string currentDeltaLink = null; + string deltaLinkAvailable; + JSONValue deltaChanges; + ulong responseBundleCount; + ulong jsonItemsReceived = 0; - // set cleanupLocalFiles to true - void setCleanupLocalFiles() - { - cleanupLocalFiles = true; - log.vdebug("Setting cleanupLocalFiles = true"); - } - - // return the OneDrive Account Type - auto getAccountType() - { - // return account type in use - return accountType; - } - - // download all new changes from OneDrive - void applyDifferences(bool performFullItemScan) - { - // Set defaults for the root folder - // Use the global's as initialised via init() rather than performing unnecessary additional HTTPS calls - string driveId = defaultDriveId; - string rootId = defaultRootId; - applyDifferences(driveId, rootId, performFullItemScan); - - // Check OneDrive Personal Shared Folders - if (accountType == "personal"){ - // https://github.com/OneDrive/onedrive-api-docs/issues/764 - Item[] items = itemdb.selectRemoteItems(); - foreach (item; items) { - // Only check path if config is != "" - if (cfg.getValueString("skip_dir") != "") { - // The path that needs to be checked needs to include the '/' - // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched - if (selectiveSync.isDirNameExcluded(item.name)) { - // This directory name is excluded - log.vlog("Skipping item - excluded by skip_dir config: ", item.name); - continue; - } - } - // Directory name is not excluded or skip_dir is not populated - log.vdebug("------------------------------------------------------------------"); - if (!cfg.getValueBool("monitor")) { - log.log("Syncing this OneDrive Personal Shared Folder: ", item.name); - } else { - log.vlog("Syncing this OneDrive Personal Shared Folder: ", item.name); - } - // Check this OneDrive Personal Shared Folders - applyDifferences(item.remoteDriveId, item.remoteId, performFullItemScan); - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, item.remoteDriveId)) { - // Add this OneDrive Personal Shared Folder driveId array - driveIDsArray ~= item.remoteDriveId; - } - } + // Reset jsonItemsToProcess & processedCount + jsonItemsToProcess = []; + processedCount = 0; + + // Was a driveId provided as an input + //if (driveIdToQuery == "") { + if (strip(driveIdToQuery).empty) { + // No provided driveId to query, use the account default + log.vdebug("driveIdToQuery was empty, setting to appConfig.defaultDriveId"); + driveIdToQuery = appConfig.defaultDriveId; + log.vdebug("driveIdToQuery: ", driveIdToQuery); } - // Check OneDrive Business Shared Folders, if configured to do so - if (syncBusinessFolders){ - // query OneDrive Business Shared Folders shared with me - log.vlog("Attempting to sync OneDrive Business Shared Folders"); - JSONValue graphQuery; - try { - graphQuery = onedrive.getSharedWithMe(); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // HTTP request returned status code 401 (Unauthorized) - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - handleClientUnauthorised(); - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - graphQuery = onedrive.getSharedWithMe();"); - graphQuery = onedrive.getSharedWithMe(); - } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + // Was an itemId provided as an input + //if (itemIdToQuery == "") { + if (strip(itemIdToQuery).empty) { + // No provided itemId to query, use the account default + log.vdebug("itemIdToQuery was empty, setting to appConfig.defaultRootId"); + itemIdToQuery = appConfig.defaultRootId; + log.vdebug("itemIdToQuery: ", itemIdToQuery); + } + + // What OneDrive API query do we use? + // - Are we running against a National Cloud Deployments that does not support /delta ? + // National Cloud Deployments do not support /delta as a query + // https://docs.microsoft.com/en-us/graph/deployments#supported-features + // + // - Are we performing a --single-directory sync, which will exclude many items online, focusing in on a specific online directory + // + // - Are we performing a --download-only --cleanup-local-files action? + // - If we are, and we use a normal /delta query, we get all the local 'deleted' objects as well. + // - If the user deletes a folder online, then replaces it online, we download the deletion events and process the new 'upload' via the web iterface .. + // the net effect of this, is that the valid local files we want to keep, are actually deleted ...... not desirable + if ((singleDirectoryScope) || (nationalCloudDeployment) || (cleanupLocalFiles)) { + // Generate a simulated /delta response so that we correctly capture the current online state, less any 'online' delete and replace activity + generateSimulatedDeltaResponse = true; + } + + // What /delta query do we use? + if (!generateSimulatedDeltaResponse) { + // This should be the majority default pathway application use + // Get the current delta link from the database for this DriveID and RootID + deltaLinkAvailable = itemDB.getDeltaLink(driveIdToQuery, itemIdToQuery); + if (!deltaLinkAvailable.empty) { + log.vdebug("Using database stored deltaLink"); + currentDeltaLink = deltaLinkAvailable; + } + + // Do we need to perform a Full Scan True Up? Is 'appConfig.fullScanTrueUpRequired' set to 'true'? + if (appConfig.fullScanTrueUpRequired) { + log.log("Performing a full scan of online data to ensure consistent local state"); + log.vdebug("Setting currentDeltaLink = null"); + currentDeltaLink = null; + } + + // Dynamic output for non-verbose and verbose run so that the user knows something is being retreived from the OneDrive API + if (log.verbose <= 1) { + if (!appConfig.surpressLoggingOutput) { + log.fileOnly("Fetching items from the OneDrive API for Drive ID: ", driveIdToQuery); + // Use the dots to show the application is 'doing something' + write("Fetching items from the OneDrive API for Drive ID: ", driveIdToQuery, " ."); } + } else { + log.vdebug("Fetching /delta response from the OneDrive API for Drive ID: ", driveIdToQuery); } + + // Create a new API Instance for querying /delta and initialise it + OneDriveApi getDeltaQueryOneDriveApiInstance; + getDeltaQueryOneDriveApiInstance = new OneDriveApi(appConfig); + getDeltaQueryOneDriveApiInstance.initialise(); - if (graphQuery.type() == JSONType.object) { - string sharedFolderName; - foreach (searchResult; graphQuery["value"].array) { - // Configure additional logging items for this array element - string sharedByName; - string sharedByEmail; - // Extra details for verbose logging - if ("sharedBy" in searchResult["remoteItem"]["shared"]) { - if ("displayName" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByName = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["displayName"].str; - } - if ("email" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByEmail = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["email"].str; - } + for (;;) { + responseBundleCount++; + // Get the /delta changes via the OneDrive API + // getDeltaChangesByItemId has the re-try logic for transient errors + deltaChanges = getDeltaChangesByItemId(driveIdToQuery, itemIdToQuery, currentDeltaLink, getDeltaQueryOneDriveApiInstance); + + // If the initial deltaChanges response is an invalid JSON object, keep trying .. + if (deltaChanges.type() != JSONType.object) { + while (deltaChanges.type() != JSONType.object) { + // Handle the invalid JSON response adn retry + log.vdebug("ERROR: Query of the OneDrive API via deltaChanges = getDeltaChangesByItemId() returned an invalid JSON response"); + deltaChanges = getDeltaChangesByItemId(driveIdToQuery, itemIdToQuery, currentDeltaLink, getDeltaQueryOneDriveApiInstance); } + } - // is the shared item with us a 'folder' ? - if (isItemFolder(searchResult)) { - // item returned is a shared folder, not a shared file - sharedFolderName = searchResult["name"].str; - // Output Shared Folder Name early - log.vdebug("Shared Folder Name: ", sharedFolderName); - // Compare this to values in business_shared_folders - if(selectiveSync.isSharedFolderMatched(sharedFolderName)){ - // Folder name matches what we are looking for - // Flags for matching - bool itemInDatabase = false; - bool itemLocalDirExists = false; - bool itemPathIsLocal = false; - - // "what if" there are 2 or more folders shared with me have the "same" name? - // The folder name will be the same, but driveId will be different - // This will then cause these 'shared folders' to cross populate data, which may not be desirable - log.vdebug("Shared Folder Name: MATCHED to any entry in 'business_shared_folders'"); - log.vdebug("Parent Drive Id: ", searchResult["remoteItem"]["parentReference"]["driveId"].str); - log.vdebug("Shared Item Id: ", searchResult["remoteItem"]["id"].str); - Item databaseItem; - - // for each driveid in the existing driveIDsArray - foreach (searchDriveId; driveIDsArray) { - log.vdebug("searching database for: ", searchDriveId, " ", sharedFolderName); - if (itemdb.idInLocalDatabase(searchDriveId, searchResult["remoteItem"]["id"].str)){ - // Shared folder is present - log.vdebug("Found shared folder name in database"); - itemInDatabase = true; - // Query the DB for the details of this item - itemdb.selectByPath(sharedFolderName, searchDriveId, databaseItem); - log.vdebug("databaseItem: ", databaseItem); - // Does the databaseItem.driveId == defaultDriveId? - if (databaseItem.driveId == defaultDriveId) { - itemPathIsLocal = true; - } - } else { - log.vdebug("Shared folder name not found in database"); - // "what if" there is 'already' a local folder with this name - // Check if in the database - // If NOT in the database, but resides on disk, this could be a new local folder created after last sync but before this one - // However we sync 'shared folders' before checking for local changes - string localpath = expandTilde(cfg.getValueString("sync_dir")) ~ "/" ~ sharedFolderName; - if (exists(localpath)) { - // local path exists - log.vdebug("Found shared folder name in local OneDrive sync_dir"); - itemLocalDirExists = true; - } - } - } - - // Shared Folder Evaluation Debugging - log.vdebug("item in database: ", itemInDatabase); - log.vdebug("path exists on disk: ", itemLocalDirExists); - log.vdebug("database drive id matches defaultDriveId: ", itemPathIsLocal); - log.vdebug("database data matches search data: ", ((databaseItem.driveId == searchResult["remoteItem"]["parentReference"]["driveId"].str) && (databaseItem.id == searchResult["remoteItem"]["id"].str))); - - if ( ((!itemInDatabase) || (!itemLocalDirExists)) || (((databaseItem.driveId == searchResult["remoteItem"]["parentReference"]["driveId"].str) && (databaseItem.id == searchResult["remoteItem"]["id"].str)) && (!itemPathIsLocal)) ) { - // This shared folder does not exist in the database - if (!cfg.getValueBool("monitor")) { - log.log("Syncing this OneDrive Business Shared Folder: ", sharedFolderName); - } else { - log.vlog("Syncing this OneDrive Business Shared Folder: ", sharedFolderName); - } - Item businessSharedFolder = makeItem(searchResult); - - // Log who shared this to assist with sync data correlation - if ((sharedByName != "") && (sharedByEmail != "")) { - log.vlog("OneDrive Business Shared Folder - Shared By: ", sharedByName, " (", sharedByEmail, ")"); - } else { - if (sharedByName != "") { - log.vlog("OneDrive Business Shared Folder - Shared By: ", sharedByName); - } - } - - // Do the actual sync - applyDifferences(businessSharedFolder.remoteDriveId, businessSharedFolder.remoteId, performFullItemScan); - // add this parent drive id to the array to search for, ready for next use - string newDriveID = searchResult["remoteItem"]["parentReference"]["driveId"].str; - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, newDriveID)) { - // Add this drive id to the array to search with - driveIDsArray ~= newDriveID; - } - } else { - // Shared Folder Name Conflict ... - log.log("WARNING: Skipping shared folder due to existing name conflict: ", sharedFolderName); - log.log("WARNING: Skipping changes of Path ID: ", searchResult["remoteItem"]["id"].str); - log.log("WARNING: To sync this shared folder, this shared folder needs to be renamed"); - - // Log who shared this to assist with conflict resolution - if ((sharedByName != "") && (sharedByEmail != "")) { - log.vlog("WARNING: Conflict Shared By: ", sharedByName, " (", sharedByEmail, ")"); - } else { - if (sharedByName != "") { - log.vlog("WARNING: Conflict Shared By: ", sharedByName); - } - } - } - } else { - log.vdebug("Shared Folder Name: NO MATCH to any entry in 'business_shared_folders'"); - } - } else { - // not a folder, is this a file? - if (isItemFile(searchResult)) { - // shared item is a file - string sharedFileName = searchResult["name"].str; - // log that this is not supported - log.vlog("WARNING: Not syncing this OneDrive Business Shared File: ", sharedFileName); - - // Log who shared this to assist with sync data correlation - if ((sharedByName != "") && (sharedByEmail != "")) { - log.vlog("OneDrive Business Shared File - Shared By: ", sharedByName, " (", sharedByEmail, ")"); - } else { - if (sharedByName != "") { - log.vlog("OneDrive Business Shared File - Shared By: ", sharedByName); - } - } - } else { - // something else entirely - log.log("WARNING: Not syncing this OneDrive Business Shared item: ", searchResult["name"].str); - } + ulong nrChanges = count(deltaChanges["value"].array); + int changeCount = 0; + + if (log.verbose <= 1) { + // Dynamic output for a non-verbose run so that the user knows something is happening + if (!appConfig.surpressLoggingOutput) { + write("."); } + } else { + log.vdebug("API Response Bundle: ", responseBundleCount, " - Quantity of 'changes|items' in this bundle to process: ", nrChanges); } - } else { - // Log that an invalid JSON object was returned - log.error("ERROR: onedrive.getSharedWithMe call returned an invalid JSON Object"); - } - } - } - - // download all new changes from a specified folder on OneDrive - void applyDifferencesSingleDirectory(const(string) path) - { - // Ensure we check the 'right' location for this directory on OneDrive - // It could come from the following places: - // 1. My OneDrive Root - // 2. My OneDrive Root as an Office 365 Shared Library - // 3. A OneDrive Business Shared Folder - // If 1 & 2, the configured default items are what we need - // If 3, we need to query OneDrive - - string driveId = defaultDriveId; - string rootId = defaultRootId; - string folderId; - string itemId; - JSONValue onedrivePathDetails; - - // Check OneDrive Business Shared Folders, if configured to do so - if (syncBusinessFolders){ - log.vlog("Attempting to sync OneDrive Business Shared Folders"); - // query OneDrive Business Shared Folders shared with me - JSONValue graphQuery; - try { - graphQuery = onedrive.getSharedWithMe(); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // HTTP request returned status code 401 (Unauthorized) - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - handleClientUnauthorised(); + + jsonItemsReceived = jsonItemsReceived + nrChanges; + + // We have a valid deltaChanges JSON array. This means we have at least 200+ JSON items to process. + // The API response however cannot be run in parallel as the OneDrive API sends the JSON items in the order in which they must be processed + foreach (onedriveJSONItem; deltaChanges["value"].array) { + // increment change count for this item + changeCount++; + // Process the OneDrive object item JSON + processDeltaJSONItem(onedriveJSONItem, nrChanges, changeCount, responseBundleCount, singleDirectoryScope); } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - graphQuery = onedrive.getSharedWithMe();"); - graphQuery = onedrive.getSharedWithMe(); + + // The response may contain either @odata.deltaLink or @odata.nextLink + if ("@odata.deltaLink" in deltaChanges) { + // Log action + log.vdebug("Setting next currentDeltaLink to (@odata.deltaLink): ", deltaChanges["@odata.deltaLink"].str); + // Update currentDeltaLink + currentDeltaLink = deltaChanges["@odata.deltaLink"].str; + // Store this for later use post processing jsonItemsToProcess items + latestDeltaLink = deltaChanges["@odata.deltaLink"].str; } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + + // Update deltaLink to next changeSet bundle + if ("@odata.nextLink" in deltaChanges) { + // Log action + log.vdebug("Setting next currentDeltaLink & deltaLinkAvailable to (@odata.nextLink): ", deltaChanges["@odata.nextLink"].str); + // Update currentDeltaLink + currentDeltaLink = deltaChanges["@odata.nextLink"].str; + // Update deltaLinkAvailable to next changeSet bundle to quantify how many changes we have to process + deltaLinkAvailable = deltaChanges["@odata.nextLink"].str; + // Store this for later use post processing jsonItemsToProcess items + latestDeltaLink = deltaChanges["@odata.nextLink"].str; } + else break; } - if (graphQuery.type() == JSONType.object) { - // valid response from OneDrive - string sharedFolderName; - foreach (searchResult; graphQuery["value"].array) { - // set sharedFolderName - sharedFolderName = searchResult["name"].str; - // Configure additional logging items for this array element - string sharedByName; - string sharedByEmail; - - // Extra details for verbose logging - if ("sharedBy" in searchResult["remoteItem"]["shared"]) { - if ("displayName" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByName = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["displayName"].str; - } - if ("email" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByEmail = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["email"].str; - } - } - - // Compare this to values in business_shared_folders - if(selectiveSync.isSharedFolderMatched(sharedFolderName)){ - // Matched sharedFolderName to item in business_shared_folders - log.vdebug("Matched sharedFolderName in business_shared_folders: ", sharedFolderName); - // But is this shared folder what we are looking for as part of --single-directory? - // User could be using 'directory' or 'directory/directory1/directory2/directory3/' - // Can we find 'sharedFolderName' in the given 'path' - if (canFind(path, sharedFolderName)) { - // Found 'sharedFolderName' in the given 'path' - log.vdebug("Matched 'sharedFolderName' in the given 'path'"); - // What was the matched folder JSON - log.vdebug("Matched sharedFolderName in business_shared_folders JSON: ", searchResult); - // Path we want to sync is on a OneDrive Business Shared Folder - // Set the correct driveId - driveId = searchResult["remoteItem"]["parentReference"]["driveId"].str; - // Set this items id - itemId = searchResult["remoteItem"]["id"].str; - log.vdebug("Updated the driveId to a new value: ", driveId); - log.vdebug("Updated the itemId to a new value: ", itemId); - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, driveId)) { - // Add this drive id to the array to search with - driveIDsArray ~= driveId; - } - - // Log who shared this to assist with sync data correlation - if ((sharedByName != "") && (sharedByEmail != "")) { - log.vlog("OneDrive Business Shared Folder - Shared By: ", sharedByName, " (", sharedByEmail, ")"); - } else { - if (sharedByName != "") { - log.vlog("OneDrive Business Shared Folder - Shared By: ", sharedByName); - } - } - } - } + // To finish off the JSON processing items, this is needed to reflect this in the log + log.vdebug("------------------------------------------------------------------"); + + // Shutdown the API + getDeltaQueryOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(getDeltaQueryOneDriveApiInstance); + + // Log that we have finished querying the /delta API + if (log.verbose <= 1) { + if (!appConfig.surpressLoggingOutput) { + write("\n"); } } else { - // Log that an invalid JSON object was returned - log.error("ERROR: onedrive.getSharedWithMe call returned an invalid JSON Object"); + log.vdebug("Finished processing /delta JSON response from the OneDrive API"); } - } - - // Test if the path we are going to sync from actually exists on OneDrive - log.vlog("Getting path details from OneDrive ..."); - try { - // Need to use different calls here - one call for majority, another if this is a OneDrive Business Shared Folder - if (!syncBusinessFolders){ - // Not a OneDrive Business Shared Folder - log.vdebug("Calling onedrive.getPathDetailsByDriveId(driveId, path) with: ", driveId, ", ", path); - onedrivePathDetails = onedrive.getPathDetailsByDriveId(driveId, path); - } else { - // OneDrive Business Shared Folder - Use another API call using the folders correct driveId and itemId - log.vdebug("Calling onedrive.getPathDetailsByDriveIdAndItemId(driveId, itemId) with: ", driveId, ", ", itemId); - onedrivePathDetails = onedrive.getPathDetailsByDriveIdAndItemId(driveId, itemId); + + // If this was set, now unset it, as this will have been completed, so that for a true up, we dont do a double full scan + if (appConfig.fullScanTrueUpRequired) { + log.vdebug("Unsetting fullScanTrueUpRequired as this has been performed"); + appConfig.fullScanTrueUpRequired = false; } - } catch (OneDriveException e) { - log.vdebug("onedrivePathDetails = onedrive.getPathDetails(path) generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // The directory was not found - if (syncBusinessFolders){ - // 404 was returned when trying to use a specific driveId and itemId .. which 'should' work .... but didnt - // Try the query with the path as a backup failsafe - log.vdebug("Calling onedrive.getPathDetailsByDriveId(driveId, path) as backup with: ", driveId, ", ", path); - try { - // try calling using the path - onedrivePathDetails = onedrive.getPathDetailsByDriveId(driveId, path); - } catch (OneDriveException e) { - - if (e.httpStatusCode == 404) { - log.error("ERROR: The requested single directory to sync was not found on OneDrive - Check folder permissions and sharing status with folder owner"); - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling applyDifferencesSingleDirectory(path);"); - applyDifferencesSingleDirectory(path); - // return back to original call - return; - } - - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; - } - } - } else { - // Not a OneDrive Business Shared folder operation - log.error("ERROR: The requested single directory to sync was not found on OneDrive"); - return; - } + } else { + // Why are are generating a /delta response + log.vdebug("Why are we generating a /delta response:"); + log.vdebug(" singleDirectoryScope: ", singleDirectoryScope); + log.vdebug(" nationalCloudDeployment: ", nationalCloudDeployment); + log.vdebug(" cleanupLocalFiles: ", cleanupLocalFiles); + + // What 'path' are we going to start generating the response for + string pathToQuery; + + // If --single-directory has been called, use the value that has been set + if (singleDirectoryScope) { + pathToQuery = appConfig.getValueString("single_directory"); } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling applyDifferencesSingleDirectory(path);"); - applyDifferencesSingleDirectory(path); - // return back to original call - return; + // We could also be syncing a Shared Folder of some description + if (!sharedFolderName.empty) { + pathToQuery = sharedFolderName; } - - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; + + // Generate the simulated /delta response + // + // The generated /delta response however contains zero deleted JSON items, so the only way that we can track this, is if the object was in sync + // we have the object in the database, thus, what we need to do is for every DB object in the tree of items, flag 'syncStatus' as 'N', then when we process + // the returned JSON items from the API, we flag the item as back in sync, then we can cleanup any out-of-sync items + // + // The flagging of the local database items to 'N' is handled within the generateDeltaResponse() function + // + // When these JSON items are then processed, if the item exists online, and is in the DB, and that the values match, the DB item is flipped back to 'Y' + // This then allows the application to look for any remaining 'N' values, and delete these as no longer needed locally + deltaChanges = generateDeltaResponse(pathToQuery); + + ulong nrChanges = count(deltaChanges["value"].array); + int changeCount = 0; + log.vdebug("API Response Bundle: ", responseBundleCount, " - Quantity of 'changes|items' in this bundle to process: ", nrChanges); + jsonItemsReceived = jsonItemsReceived + nrChanges; + + // The API response however cannot be run in parallel as the OneDrive API sends the JSON items in the order in which they must be processed + foreach (onedriveJSONItem; deltaChanges["value"].array) { + // increment change count for this item + changeCount++; + // Process the OneDrive object item JSON + processDeltaJSONItem(onedriveJSONItem, nrChanges, changeCount, responseBundleCount, singleDirectoryScope); + } + + // To finish off the JSON processing items, this is needed to reflect this in the log + log.vdebug("------------------------------------------------------------------"); + + // Log that we have finished generating our self generated /delta response + if (!appConfig.surpressLoggingOutput) { + log.log("Finished processing self generated /delta JSON response from the OneDrive API"); } } - // OK - the path on OneDrive should exist, get the driveId and rootId for this folder - // Was the response a valid JSON Object? - if (onedrivePathDetails.type() == JSONType.object) { - // OneDrive Personal Shared Folder handling - // Is this item a remote item? - if(isItemRemote(onedrivePathDetails)){ - // 2 step approach: - // 1. Ensure changes for the root remote path are captured - // 2. Download changes specific to the remote path - - // root remote - applyDifferences(defaultDriveId, onedrivePathDetails["id"].str, false); + // Cleanup deltaChanges as this is no longer needed + object.destroy(deltaChanges); + + // We have JSON items received from the OneDrive API + log.vdebug("Number of JSON Objects received from OneDrive API: ", jsonItemsReceived); + log.vdebug("Number of JSON Objects already processed (root and deleted items): ", (jsonItemsReceived - jsonItemsToProcess.length)); + + // We should have now at least processed all the JSON items as returned by the /delta call + // Additionally, we should have a new array, that now contains all the JSON items we need to process that are non 'root' or deleted items + log.vdebug("Number of JSON items to process is: ", jsonItemsToProcess.length); + + // Are there items to process? + if (jsonItemsToProcess.length > 0) { + // Lets deal with the JSON items in a batch process + ulong batchSize = 500; + ulong batchCount = (jsonItemsToProcess.length + batchSize - 1) / batchSize; + ulong batchesProcessed = 0; + + // Dynamic output for a non-verbose run so that the user knows something is happening + if (!appConfig.surpressLoggingOutput) { + write("Processing ", jsonItemsToProcess.length, " applicable changes and items received from Microsoft OneDrive "); + log.fileOnly("Processing ", jsonItemsToProcess.length, " applicable changes and items received from Microsoft OneDrive"); + if (log.verbose != 0) { + // close out the write() processing line above + writeln(); + } + } - // remote changes - driveId = onedrivePathDetails["remoteItem"]["parentReference"]["driveId"].str; // Should give something like 66d53be8a5056eca - folderId = onedrivePathDetails["remoteItem"]["id"].str; // Should give something like BC7D88EC1F539DCF!107 + // For each batch, process the JSON items that need to be now processed. + // 'root' and deleted objects have already been handled + foreach (batchOfJSONItems; jsonItemsToProcess.chunks(batchSize)) { + // Chunk the total items to process into 500 lot items + batchesProcessed++; - // Apply any differences found on OneDrive for this path (download data) - applyDifferences(driveId, folderId, false); - } else { - // use the item id as folderId - folderId = onedrivePathDetails["id"].str; // Should give something like 12345ABCDE1234A1!101 - // Apply any differences found on OneDrive for this path (download data) - // Use driveId rather than defaultDriveId as this will be updated if path was matched to another parent driveId - applyDifferences(driveId, folderId, false); + if (log.verbose == 0) { + // Dynamic output for a non-verbose run so that the user knows something is happening + if (!appConfig.surpressLoggingOutput) { + write("."); + } + } else { + log.vlog("Processing OneDrive JSON item batch [", batchesProcessed,"/", batchCount, "] to ensure consistent local state"); + } + + // Process the batch + processJSONItemsInBatch(batchOfJSONItems, batchesProcessed, batchCount); + + // To finish off the JSON processing items, this is needed to reflect this in the log + log.vdebug("------------------------------------------------------------------"); } - } else { - // Log that an invalid JSON object was returned - log.vdebug("onedrive.getPathDetails call returned an invalid JSON Object"); - } - } - - // make sure the OneDrive root is in our database - auto checkDatabaseForOneDriveRoot() - { - log.vlog("Fetching details for OneDrive Root"); - JSONValue rootPathDetails = onedrive.getDefaultRoot(); // Returns a JSON Value - - // validate object is a JSON value - if (rootPathDetails.type() == JSONType.object) { - // valid JSON object - Item rootPathItem = makeItem(rootPathDetails); - // configure driveId and rootId for the OneDrive Root - // Set defaults for the root folder - string driveId = rootPathDetails["parentReference"]["driveId"].str; // Should give something like 12345abcde1234a1 - string rootId = rootPathDetails["id"].str; // Should give something like 12345ABCDE1234A1!101 - - // Query the database - if (!itemdb.selectById(driveId, rootId, rootPathItem)) { - log.vlog("OneDrive Root does not exist in the database. We need to add it."); - applyDifference(rootPathDetails, driveId, true); - log.vlog("Added OneDrive Root to the local database"); - } else { - log.vlog("OneDrive Root exists in the database"); + + if (log.verbose == 0) { + // close off '.' output + if (!appConfig.surpressLoggingOutput) { + writeln(); + } } + + // Free up memory and items processed as it is pointless now having this data around + jsonItemsToProcess = []; + + // Debug output - what was processed + log.vdebug("Number of JSON items to process is: ", jsonItemsToProcess.length); + log.vdebug("Number of JSON items processed was: ", processedCount); } else { - // Log that an invalid JSON object was returned - log.error("ERROR: Unable to query OneDrive for account details"); - log.vdebug("onedrive.getDefaultRoot call returned an invalid JSON Object"); - // Must exit here as we cant configure our required variables - onedrive.shutdown(); - exit(-1); + if (!appConfig.surpressLoggingOutput) { + log.log("No additional changes or items that can be applied were discovered while processing the data received from Microsoft OneDrive"); + } } + + // Update the deltaLink in the database so that we can reuse this now that jsonItemsToProcess has been processed + if (!latestDeltaLink.empty) { + log.vdebug("Updating completed deltaLink in DB to: ", latestDeltaLink); + itemDB.setDeltaLink(driveIdToQuery, itemIdToQuery, latestDeltaLink); + } + + // Keep the driveIDsArray with unique entries only + if (!canFind(driveIDsArray, driveIdToQuery)) { + // Add this driveId to the array of driveId's we know about + driveIDsArray ~= driveIdToQuery; + } } - // create a directory on OneDrive without syncing - auto createDirectoryNoSync(const(string) path) - { - // Attempt to create the requested path within OneDrive without performing a sync - log.vlog("Attempting to create the requested path within OneDrive"); - - // Handle the remote folder creation and updating of the local database without performing a sync - uploadCreateDir(path); - } - - // delete a directory on OneDrive without syncing - auto deleteDirectoryNoSync(const(string) path) - { - // Use the global's as initialised via init() rather than performing unnecessary additional HTTPS calls - const(char)[] rootId = defaultRootId; + // Process the /delta API JSON response items + void processDeltaJSONItem(JSONValue onedriveJSONItem, ulong nrChanges, int changeCount, ulong responseBundleCount, bool singleDirectoryScope) { - // Attempt to delete the requested path within OneDrive without performing a sync - log.vlog("Attempting to delete the requested path within OneDrive"); + // Variables for this foreach loop + string thisItemId; + bool itemIsRoot = false; + bool handleItemAsRootObject = false; + bool itemIsDeletedOnline = false; + bool itemHasParentReferenceId = false; + bool itemHasParentReferencePath = false; + bool itemIdMatchesDefaultRootId = false; + bool itemNameExplicitMatchRoot = false; + string objectParentDriveId; - // test if the path we are going to exists on OneDrive - try { - onedrive.getPathDetails(path); - } catch (OneDriveException e) { - log.vdebug("onedrive.getPathDetails(path) generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // The directory was not found on OneDrive - no need to delete it - log.vlog("The requested directory to delete was not found on OneDrive - skipping removing the remote directory as it doesn't exist"); - return; - } + log.vdebug("------------------------------------------------------------------"); + log.vdebug("Processing OneDrive Item ", changeCount, " of ", nrChanges, " from API Response Bundle ", responseBundleCount); + log.vdebug("Raw JSON OneDrive Item: ", onedriveJSONItem); + // What is this item's id + thisItemId = onedriveJSONItem["id"].str; + // Is this a deleted item - only calculate this once + itemIsDeletedOnline = isItemDeleted(onedriveJSONItem); + + if(!itemIsDeletedOnline){ + // This is not a deleted item + log.vdebug("This item is not a OneDrive deletion change"); + // Only calculate this once + itemIsRoot = isItemRoot(onedriveJSONItem); + itemHasParentReferenceId = hasParentReferenceId(onedriveJSONItem); + itemIdMatchesDefaultRootId = (thisItemId == appConfig.defaultRootId); + itemNameExplicitMatchRoot = (onedriveJSONItem["name"].str == "root"); + objectParentDriveId = onedriveJSONItem["parentReference"]["driveId"].str; - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling deleteDirectoryNoSync(path);"); - deleteDirectoryNoSync(path); - // return back to original call - return; - } + // Shared Folder Items + // !hasParentReferenceId(id) + // !hasParentReferenceId(path) - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; + // Test is this is the OneDrive Users Root? + // Debug output of change evaluation items + log.vdebug("defaultRootId = ", appConfig.defaultRootId); + log.vdebug("'search id' = ", thisItemId); + log.vdebug("id == defaultRootId = ", itemIdMatchesDefaultRootId); + log.vdebug("isItemRoot(onedriveJSONItem) = ", itemIsRoot); + log.vdebug("onedriveJSONItem['name'].str == 'root' = ", itemNameExplicitMatchRoot); + log.vdebug("itemHasParentReferenceId = ", itemHasParentReferenceId); + + if ( (itemIdMatchesDefaultRootId || singleDirectoryScope) && itemIsRoot && itemNameExplicitMatchRoot) { + // This IS a OneDrive Root item or should be classified as such in the case of 'singleDirectoryScope' + log.vdebug("JSON item will flagged as a 'root' item"); + handleItemAsRootObject = true; } } - Item item; - // Need to check all driveid's we know about, not just the defaultDriveId - bool itemInDB = false; - foreach (searchDriveId; driveIDsArray) { - if (itemdb.selectByPath(path, searchDriveId, item)) { - // item was found in the DB - itemInDB = true; - break; - } - } - // Was the item found in the DB - if (!itemInDB) { - // this is odd .. this directory is not in the local database - just go delete it - log.vlog("The requested directory to delete was not found in the local database - pushing delete request direct to OneDrive"); - uploadDeleteItem(item, path); + // How do we handle this JSON item from the OneDrive API? + // Is this a confirmed 'root' item, has no Parent ID, or is a Deleted Item + if (handleItemAsRootObject || !itemHasParentReferenceId || itemIsDeletedOnline){ + // Is a root item, has no id in parentReference or is a OneDrive deleted item + log.vdebug("objectParentDriveId = ", objectParentDriveId); + log.vdebug("handleItemAsRootObject = ", handleItemAsRootObject); + log.vdebug("itemHasParentReferenceId = ", itemHasParentReferenceId); + log.vdebug("itemIsDeletedOnline = ", itemIsDeletedOnline); + log.vdebug("Handling change immediately as 'root item', or has no parent reference id or is a deleted item"); + // OK ... do something with this JSON post here .... + processRootAndDeletedJSONItems(onedriveJSONItem, objectParentDriveId, handleItemAsRootObject, itemIsDeletedOnline, itemHasParentReferenceId); } else { - // the folder was in the local database - // Handle the deletion and saving any update to the local database - log.vlog("The requested directory to delete was found in the local database. Processing the deletion normally"); - deleteByPath(path); - } - } - - // rename a directory on OneDrive without syncing - auto renameDirectoryNoSync(string source, string destination) - { - try { - // test if the local path exists on OneDrive - onedrive.getPathDetails(source); - } catch (OneDriveException e) { - log.vdebug("onedrive.getPathDetails(source); generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // The directory was not found - log.vlog("The requested directory to rename was not found on OneDrive"); - return; + // Do we need to update this RAW JSON from OneDrive? + if ( (objectParentDriveId != appConfig.defaultDriveId) && (appConfig.accountType == "business") && (appConfig.getValueBool("sync_business_shared_items")) ) { + // Potentially need to update this JSON data + log.vdebug("Potentially need to update this source JSON .... need to check the database"); + + // Check the DB for 'remote' objects, searching 'remoteDriveId' and 'remoteId' items for this remoteItem.driveId and remoteItem.id + Item remoteDBItem; + itemDB.selectByRemoteId(objectParentDriveId, thisItemId, remoteDBItem); + + // Is the data that was returned from the database what we are looking for? + if ((remoteDBItem.remoteDriveId == objectParentDriveId) && (remoteDBItem.remoteId == thisItemId)) { + // Yes, this is the record we are looking for + log.vdebug("DB Item response for remoteDBItem: ", remoteDBItem); + + // Must compare remoteDBItem.name with remoteItem.name + if (remoteDBItem.name != onedriveJSONItem["name"].str) { + // Update JSON Item + string actualOnlineName = onedriveJSONItem["name"].str; + log.vdebug("Updating source JSON 'name' to that which is the actual local directory"); + log.vdebug("onedriveJSONItem['name'] was: ", onedriveJSONItem["name"].str); + log.vdebug("Updating onedriveJSONItem['name'] to: ", remoteDBItem.name); + onedriveJSONItem["name"] = remoteDBItem.name; + log.vdebug("onedriveJSONItem['name'] now: ", onedriveJSONItem["name"].str); + // Add the original name to the JSON + onedriveJSONItem["actualOnlineName"] = actualOnlineName; + } + } } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling renameDirectoryNoSync(source, destination);"); - renameDirectoryNoSync(source, destination); - // return back to original call - return; + + // If we are not self-generating a /delta response, check this initial /delta JSON bundle item against the basic checks + // of applicability against 'skip_file', 'skip_dir' and 'sync_list' + // We only do this if we did not generate a /delta response, as generateDeltaResponse() performs the checkJSONAgainstClientSideFiltering() + // against elements as it is building the /delta compatible response + // If we blindly just 'check again' all JSON responses then there is potentially double JSON processing going on if we used generateDeltaResponse() + bool discardDeltaJSONItem = false; + if (!generateSimulatedDeltaResponse) { + // Check applicability against 'skip_file', 'skip_dir' and 'sync_list' + discardDeltaJSONItem = checkJSONAgainstClientSideFiltering(onedriveJSONItem); } - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; + // Add this JSON item for further processing if this is not being discarded + if (!discardDeltaJSONItem) { + log.vdebug("Adding this Raw JSON OneDrive Item to jsonItemsToProcess array for further processing"); + jsonItemsToProcess ~= onedriveJSONItem; } } - // The OneDrive API returned a 200 OK status, so the folder exists - // Rename the requested directory on OneDrive without performing a sync - moveByPath(source, destination); } - // download the new changes of a specific item - // id is the root of the drive or a shared folder - private void applyDifferences(string driveId, const(char)[] id, bool performFullItemScan) - { - log.vlog("Applying changes of Path ID: " ~ id); - // function variables - char[] idToQuery; - JSONValue changes; - JSONValue changesAvailable; - JSONValue idDetails; - JSONValue currentDriveQuota; - string syncFolderName; - string syncFolderPath; - string syncFolderChildPath; - string deltaLink; - string deltaLinkAvailable; - bool nationalCloudChildrenScan = false; + // Process 'root' and 'deleted' OneDrive JSON items + void processRootAndDeletedJSONItems(JSONValue onedriveJSONItem, string driveId, bool handleItemAsRootObject, bool itemIsDeletedOnline, bool itemHasParentReferenceId) { - // Tracking processing performance - SysTime startFunctionProcessingTime; - SysTime endFunctionProcessingTime; - SysTime startBundleProcessingTime; - SysTime endBundleProcessingTime; - ulong cumulativeOneDriveItemCount = 0; + // Use the JSON elements rather can computing a DB struct via makeItem() + string thisItemId = onedriveJSONItem["id"].str; + string thisItemDriveId = onedriveJSONItem["parentReference"]["driveId"].str; + + // Check if the item has been seen before + Item existingDatabaseItem; + bool existingDBEntry = itemDB.selectById(thisItemDriveId, thisItemId, existingDatabaseItem); - if (displayProcessingTime) { - writeln("============================================================"); - writeln("Querying OneDrive API for relevant 'changes|items' stored online for this account"); - startFunctionProcessingTime = Clock.currTime(); - writeln("Start Function Processing Time: ", startFunctionProcessingTime); - } - - // Update the quota details for this driveId, as this could have changed since we started the application - the user could have added / deleted data online, or purchased additional storage - // Quota details are ONLY available for the main default driveId, as the OneDrive API does not provide quota details for shared folders - try { - currentDriveQuota = onedrive.getDriveQuota(driveId); - } catch (OneDriveException e) { - log.vdebug("currentDriveQuota = onedrive.getDriveQuota(driveId) generated a OneDriveException"); - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling applyDifferences(driveId, id, performFullItemScan);"); - applyDifferences(driveId, id, performFullItemScan); - // return back to original call - return; + // Is the item deleted online? + if(!itemIsDeletedOnline) { + + // Is the item a confirmed root object? + + // The JSON item should be considered a 'root' item if: + // 1. Contains a ["root"] element + // 2. Has no ["parentReference"]["id"] ... #323 & #324 highlighted that this is false as some 'root' shared objects now can have an 'id' element .. OneDrive API change + // 2. Has no ["parentReference"]["path"] + // 3. Was detected by an input flag as to be handled as a root item regardless of actual status + + if ((handleItemAsRootObject) || (!itemHasParentReferenceId)) { + log.vdebug("Handing JSON object as OneDrive 'root' object"); + if (!existingDBEntry) { + // we have not seen this item before + saveItem(onedriveJSONItem); + } } - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; + } else { + // Change is to delete an item + log.vdebug("Handing a OneDrive Deleted Item"); + if (existingDBEntry) { + // Flag to delete + log.vdebug("Flagging to delete item locally: ", onedriveJSONItem); + idsToDelete ~= [thisItemDriveId, thisItemId]; + } else { + // Flag to ignore + log.vdebug("Flagging item to skip: ", onedriveJSONItem); + skippedItems.insert(thisItemId); } } - - // validate that currentDriveQuota is a JSON value - if (currentDriveQuota.type() == JSONType.object) { - // Response from API contains valid data - // If 'personal' accounts, if driveId == defaultDriveId, then we will have data - // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - // If 'business' accounts, if driveId == defaultDriveId, then we will have data - // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be 0 values - if ("quota" in currentDriveQuota){ - if (driveId == defaultDriveId) { - // We potentially have updated quota remaining details available - // However in some cases OneDrive Business configurations 'restrict' quota details thus is empty / blank / negative value / zero - if ("remaining" in currentDriveQuota["quota"]){ - // We have valid quota details returned for the drive id - remainingFreeSpace = currentDriveQuota["quota"]["remaining"].integer; - if (remainingFreeSpace <= 0) { - if (accountType == "personal"){ - // zero space available - log.error("ERROR: OneDrive account currently has zero space available. Please free up some space online."); - quotaAvailable = false; - } else { - // zero space available is being reported, maybe being restricted? - log.error("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); - quotaRestricted = true; - } - } else { - // Display the updated value - log.vlog("Updated Remaining Free Space: ", remainingFreeSpace); - } + } + + // Process each of the elements contained in jsonItemsToProcess[] + void processJSONItemsInBatch(JSONValue[] array, ulong batchGroup, ulong batchCount) { + + ulong batchElementCount = array.length; + + foreach (i, onedriveJSONItem; array.enumerate) { + // Use the JSON elements rather can computing a DB struct via makeItem() + ulong elementCount = i +1; + + // To show this is the processing for this particular item, start off with this breaker line + log.vdebug("------------------------------------------------------------------"); + log.vdebug("Processing OneDrive JSON item ", elementCount, " of ", batchElementCount, " as part of JSON Item Batch ", batchGroup, " of ", batchCount); + log.vdebug("Raw JSON OneDrive Item: ", onedriveJSONItem); + + string thisItemId = onedriveJSONItem["id"].str; + string thisItemDriveId = onedriveJSONItem["parentReference"]["driveId"].str; + string thisItemParentId = onedriveJSONItem["parentReference"]["id"].str; + string thisItemName = onedriveJSONItem["name"].str; + + // Create an empty item struct for an existing DB item + Item existingDatabaseItem; + + // Do we NOT want this item? + bool unwanted = false; // meaning by default we will WANT this item + // Is this parent is in the database + bool parentInDatabase = false; + // What is the path of the new item + string newItemPath; + + // Configure the remoteItem - so if it is used, it can be utilised later + Item remoteItem; + + // Check the database for an existing entry for this JSON item + bool existingDBEntry = itemDB.selectById(thisItemDriveId, thisItemId, existingDatabaseItem); + + // Calculate if the Parent Item is in the database so that it can be re-used + parentInDatabase = itemDB.idInLocalDatabase(thisItemDriveId, thisItemParentId); + + // Calculate the path of this JSON item, but we can only do this if the parent is in the database + if (parentInDatabase) { + // Calculate this items path + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + log.vdebug("New Item calculated full path is: ", newItemPath); + } else { + // Parent not in the database + // Is the parent a 'folder' from another user? ie - is this a 'shared folder' that has been shared with us? + log.vdebug("Parent ID is not in DB .. "); + // Why? + if (thisItemDriveId == appConfig.defaultDriveId) { + // Flagging as unwanted + log.vdebug("Flagging as unwanted: thisItemDriveId (", thisItemDriveId,"), thisItemParentId (", thisItemParentId,") not in local database"); + if (thisItemParentId in skippedItems) { + log.vdebug("Reason: thisItemParentId listed within skippedItems"); } + unwanted = true; } else { - // quota details returned, but for a drive id that is not ours - if ("remaining" in currentDriveQuota["quota"]){ - // remaining is in the quota JSON response - if (currentDriveQuota["quota"]["remaining"].integer <= 0) { - // value returned is 0 or less than 0 - log.vlog("OneDrive quota information is set at zero, as this is not our drive id, ignoring"); + // Edge case as the parent (from another users OneDrive account) will never be in the database - potentially a shared object? + log.vdebug("Potential Shared Object Item: ", onedriveJSONItem); + // Format the OneDrive change into a consumable object for the database + remoteItem = makeItem(onedriveJSONItem); + log.vdebug("The reported parentId is not in the database. This potentially is a shared folder as 'remoteItem.driveId' != 'appConfig.defaultDriveId'. Relevant Details: remoteItem.driveId (", remoteItem.driveId,"), remoteItem.parentId (", remoteItem.parentId,")"); + + if (appConfig.accountType == "personal") { + // Personal Account Handling + // Ensure that this item has no parent + log.vdebug("Setting remoteItem.parentId to be null"); + remoteItem.parentId = null; + // Add this record to the local database + log.vdebug("Update/Insert local database with remoteItem details with remoteItem.parentId as null: ", remoteItem); + itemDB.upsert(remoteItem); + } else { + // Business or SharePoint Account Handling + log.vdebug("Handling a Business or SharePoint Shared Item JSON object"); + + if (appConfig.accountType == "business") { + // Create a DB Tie Record for this parent object + Item parentItem; + parentItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str; + parentItem.id = onedriveJSONItem["parentReference"]["id"].str; + parentItem.name = "root"; + parentItem.type = ItemType.dir; + parentItem.mtime = remoteItem.mtime; + parentItem.parentId = null; + + // Add this parent record to the local database + log.vdebug("Insert local database with remoteItem parent details: ", parentItem); + itemDB.upsert(parentItem); + + // Ensure that this item has no parent + log.vdebug("Setting remoteItem.parentId to be null"); + remoteItem.parentId = null; + + // Check the DB for 'remote' objects, searching 'remoteDriveId' and 'remoteId' items for this remoteItem.driveId and remoteItem.id + Item remoteDBItem; + itemDB.selectByRemoteId(remoteItem.driveId, remoteItem.id, remoteDBItem); + + // Must compare remoteDBItem.name with remoteItem.name + if ((!remoteDBItem.name.empty) && (remoteDBItem.name != remoteItem.name)) { + // Update DB Item + log.vdebug("The shared item stored in OneDrive, has a different name to the actual name on the remote drive"); + log.vdebug("Updating remoteItem.name JSON data with the actual name being used on account drive and local folder"); + log.vdebug("remoteItem.name was: ", remoteItem.name); + log.vdebug("Updating remoteItem.name to: ", remoteDBItem.name); + remoteItem.name = remoteDBItem.name; + log.vdebug("Setting remoteItem.remoteName to: ", onedriveJSONItem["name"].str); + + // Update JSON Item + remoteItem.remoteName = onedriveJSONItem["name"].str; + log.vdebug("Updating source JSON 'name' to that which is the actual local directory"); + log.vdebug("onedriveJSONItem['name'] was: ", onedriveJSONItem["name"].str); + log.vdebug("Updating onedriveJSONItem['name'] to: ", remoteDBItem.name); + onedriveJSONItem["name"] = remoteDBItem.name; + log.vdebug("onedriveJSONItem['name'] now: ", onedriveJSONItem["name"].str); + + // Update newItemPath value + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ remoteDBItem.name; + log.vdebug("New Item updated calculated full path is: ", newItemPath); + } + + // Add this record to the local database + log.vdebug("Update/Insert local database with remoteItem details: ", remoteItem); + itemDB.upsert(remoteItem); } } } - } else { - // No quota details returned - if (driveId == defaultDriveId) { - // no quota details returned for current drive id - log.error("ERROR: OneDrive quota information is missing. Potentially your OneDrive account currently has zero space available. Please free up some space online."); - } else { - // quota details not available - log.vdebug("OneDrive quota information is being restricted as this is not our drive id."); - } - } - } - - // Query OneDrive API for the name of this folder id - try { - idDetails = onedrive.getPathDetailsById(driveId, id); - } catch (OneDriveException e) { - log.vdebug("idDetails = onedrive.getPathDetailsById(driveId, id) generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // id was not found - possibly a remote (shared) folder - log.vlog("No details returned for given Path ID"); - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling applyDifferences(driveId, id, performFullItemScan);"); - applyDifferences(driveId, id, performFullItemScan); - // return back to original call - return; } - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; - } - } - - // validate that idDetails is a JSON value - if (idDetails.type() == JSONType.object) { - // Get the name of this 'Path ID' - if (("id" in idDetails) != null) { - // valid response from onedrive.getPathDetailsById(driveId, id) - a JSON item object present - if ((idDetails["id"].str == id) && (!isItemFile(idDetails))){ - // Is a Folder or Remote Folder - syncFolderName = idDetails["name"].str; - } - - // Debug output of path details as queried from OneDrive - log.vdebug("OneDrive Path Details: ", idDetails); + // Check the skippedItems array for the parent id of this JSONItem if this is something we need to skip + if (!unwanted) { + if (thisItemParentId in skippedItems) { + // Flag this JSON item as unwanted + log.vdebug("Flagging as unwanted: find(thisItemParentId).length != 0"); + unwanted = true; + + // Is this item id in the database? + if (existingDBEntry) { + // item exists in database, most likely moved out of scope for current client configuration + log.vdebug("This item was previously synced / seen by the client"); + if (("name" in onedriveJSONItem["parentReference"]) != null) { - // OneDrive Personal Folder Item Reference (24/4/2019) - // "@odata.context": "https://graph.microsoft.com/v1.0/$metadata#drives('66d53be8a5056eca')/items/$entity", - // "cTag": "adDo2NkQ1M0JFOEE1MDU2RUNBITEwMS42MzY5MTY5NjQ1ODcwNzAwMDA", - // "eTag": "aNjZENTNCRThBNTA1NkVDQSExMDEuMQ", - // "fileSystemInfo": { - // "createdDateTime": "2018-06-06T20:45:24.436Z", - // "lastModifiedDateTime": "2019-04-24T07:09:31.29Z" - // }, - // "folder": { - // "childCount": 3, - // "view": { - // "sortBy": "takenOrCreatedDateTime", - // "sortOrder": "ascending", - // "viewType": "thumbnails" - // } - // }, - // "id": "66D53BE8A5056ECA!101", - // "name": "root", - // "parentReference": { - // "driveId": "66d53be8a5056eca", - // "driveType": "personal" - // }, - // "root": {}, - // "size": 0 - - // OneDrive Personal Remote / Shared Folder Item Reference (4/9/2019) - // "@odata.context": "https://graph.microsoft.com/v1.0/$metadata#drives('driveId')/items/$entity", - // "cTag": "cTag", - // "eTag": "eTag", - // "id": "itemId", - // "name": "shared", - // "parentReference": { - // "driveId": "driveId", - // "driveType": "personal", - // "id": "parentItemId", - // "path": "/drive/root:" - // }, - // "remoteItem": { - // "fileSystemInfo": { - // "createdDateTime": "2019-01-14T18:54:43.2666667Z", - // "lastModifiedDateTime": "2019-04-24T03:47:22.53Z" - // }, - // "folder": { - // "childCount": 0, - // "view": { - // "sortBy": "takenOrCreatedDateTime", - // "sortOrder": "ascending", - // "viewType": "thumbnails" - // } - // }, - // "id": "remoteItemId", - // "parentReference": { - // "driveId": "remoteDriveId", - // "driveType": "personal" - // "id": "id", - // "name": "name", - // "path": "/drives//items/:/" - // }, - // "size": 0, - // "webUrl": "webUrl" - // } - - // OneDrive Business Folder & Shared Folder Item Reference (24/4/2019) - // "@odata.context": "https://graph.microsoft.com/v1.0/$metadata#drives('driveId')/items/$entity", - // "@odata.etag": "\"{eTag},1\"", - // "cTag": "\"c:{cTag},0\"", - // "eTag": "\"{eTag},1\"", - // "fileSystemInfo": { - // "createdDateTime": "2019-04-17T04:00:43Z", - // "lastModifiedDateTime": "2019-04-17T04:00:43Z" - // }, - // "folder": { - // "childCount": 2 - // }, - // "id": "itemId", - // "name": "shared_folder", - // "parentReference": { - // "driveId": "parentDriveId", - // "driveType": "business", - // "id": "parentId", - // "path": "/drives/driveId/root:" - // }, - // "size": 0 - - // To evaluate a change received from OneDrive, this must be set correctly - if (hasParentReferencePath(idDetails)) { - // Path from OneDrive has a parentReference we can use - log.vdebug("Item details returned contains parent reference path - potentially shared folder object"); - syncFolderPath = idDetails["parentReference"]["path"].str; - syncFolderChildPath = syncFolderPath ~ "/" ~ idDetails["name"].str ~ "/"; - } else { - // No parentReference, set these to blank - log.vdebug("Item details returned no parent reference path"); - syncFolderPath = ""; - syncFolderChildPath = ""; + // How is this out of scope? + // is sync_list configured + if (syncListConfigured) { + // sync_list configured and in use + if (selectiveSync.isPathExcludedViaSyncList(onedriveJSONItem["parentReference"]["name"].str)) { + // Previously synced item is now out of scope as it has been moved out of what is included in sync_list + log.vdebug("This previously synced item is now excluded from being synced due to sync_list exclusion"); + } + } + // flag to delete local file as it now is no longer in sync with OneDrive + log.vdebug("Flagging to delete item locally: ", onedriveJSONItem); + idsToDelete ~= [thisItemDriveId, thisItemId]; + } + } } - - // Debug Output - log.vdebug("Sync Folder Name: ", syncFolderName); - log.vdebug("Sync Folder Parent Path: ", syncFolderPath); - log.vdebug("Sync Folder Child Path: ", syncFolderChildPath); - } - } else { - // Log that an invalid JSON object was returned - log.vdebug("onedrive.getPathDetailsById call returned an invalid JSON Object"); - } - - // Issue #658 - // If we are using a sync_list file, using deltaLink will actually 'miss' changes (moves & deletes) on OneDrive as using sync_list discards changes - // Use the performFullItemScan boolean to control whether we perform a full object scan of use the delta link for the root folder - // When using --synchronize the normal process order is: - // 1. Scan OneDrive for changes - // 2. Scan local folder for changes - // 3. Scan OneDrive for changes - // When using sync_list and performing a full scan, what this means is a full scan is performed twice, which leads to massive processing & time overheads - // Control this via performFullItemScan - - // Get the current delta link - deltaLinkAvailable = itemdb.getDeltaLink(driveId, id); - // if sync_list is not configured, syncListConfigured should be false - log.vdebug("syncListConfigured = ", syncListConfigured); - // oneDriveFullScanTrigger should be false unless set by actions on OneDrive and only if sync_list or skip_dir is used - log.vdebug("oneDriveFullScanTrigger = ", oneDriveFullScanTrigger); - // should only be set if 10th scan in monitor mode or as final true up sync in stand alone mode - log.vdebug("performFullItemScan = ", performFullItemScan); - - // do we override performFullItemScan if it is currently false and oneDriveFullScanTrigger is true? - if ((!performFullItemScan) && (oneDriveFullScanTrigger)) { - // forcing a full scan earlier than potentially normal - // oneDriveFullScanTrigger = true due to new folder creation request in a location that is now in-scope which was previously out of scope - performFullItemScan = true; - log.vdebug("overriding performFullItemScan as oneDriveFullScanTrigger was set"); - } - - // depending on the scan type (--monitor or --synchronize) performFullItemScan is set depending on the number of sync passes performed (--monitor) or ALWAYS if just --synchronize is used - if (!performFullItemScan){ - // performFullItemScan == false - // use delta link - log.vdebug("performFullItemScan is false, using the deltaLink as per database entry"); - if (deltaLinkAvailable == ""){ - deltaLink = ""; - log.vdebug("deltaLink was requested to be used, but contains no data - resulting API query will be treated as a full scan of OneDrive"); - } else { - deltaLink = deltaLinkAvailable; - log.vdebug("deltaLink contains valid data - resulting API query will be treated as a delta scan of OneDrive"); - } - } else { - // performFullItemScan == true - // do not use delta-link - deltaLink = ""; - log.vdebug("performFullItemScan is true, not using the database deltaLink so that we query all objects on OneDrive to compare against all local objects"); - } - - for (;;) { - - if (displayProcessingTime) { - writeln("------------------------------------------------------------"); - startBundleProcessingTime = Clock.currTime(); - writeln("Start 'change|item' API Response Bundle Processing Time: ", startBundleProcessingTime); - } - - // Due to differences in OneDrive API's between personal and business we need to get changes only from defaultRootId - // If we used the 'id' passed in & when using --single-directory with a business account we get: - // 'HTTP request returned status code 501 (Not Implemented): view.delta can only be called on the root.' - // To view changes correctly, we need to use the correct path id for the request - if (driveId == defaultDriveId) { - // The drive id matches our users default drive id - log.vdebug("Configuring 'idToQuery' as defaultRootId duplicate"); - idToQuery = defaultRootId.dup; - } else { - // The drive id does not match our users default drive id - // Potentially the 'path id' we are requesting the details of is a Shared Folder (remote item) - // Use the 'id' that was passed in (folderId) - log.vdebug("Configuring 'idToQuery' as 'id' duplicate"); - idToQuery = id.dup; } - // what path id are we going to query? - log.vdebug("Path object to query configured as 'idToQuery' = ", idToQuery); - long deltaChanges = 0; - // What query do we use? - // National Cloud Deployments do not support /delta as a query - // https://docs.microsoft.com/en-us/graph/deployments#supported-features - // Are we running against a National Cloud Deployments that does not support /delta - if (nationalCloudDeployment) { - // National Cloud Deployment that does not support /delta query - // Have to query /children and build our own /delta response - nationalCloudChildrenScan = true; - log.vdebug("Using /children call to query drive for items to populate 'changes' and 'changesAvailable'"); - // In a OneDrive Business Shared Folder scenario + nationalCloudDeployment, if ALL items are downgraded, then this leads to local file deletion - // Downgrade ONLY files associated with this driveId and idToQuery - log.vdebug("Downgrading all children for this driveId (" ~ driveId ~ ") and idToQuery (" ~ idToQuery ~ ") to an out-of-sync state"); - - // Before we get any data, flag any object in the database as out-of-sync for this driveID & ID - auto drivePathChildren = itemdb.selectChildren(driveId, idToQuery); - if (count(drivePathChildren) > 0) { - // Children to process and flag as out-of-sync - foreach (drivePathChild; drivePathChildren) { - // Flag any object in the database as out-of-sync for this driveID & ID - log.vdebug("Downgrading item as out-of-sync: ", drivePathChild.id); - itemdb.downgradeSyncStatusFlag(drivePathChild.driveId, drivePathChild.id); - } - } - - // Build own 'changes' response to simulate a /delta response - try { - // we have to 'build' our own JSON response that looks like /delta - changes = generateDeltaResponse(driveId, idToQuery); - if (changes.type() == JSONType.object) { - log.vdebug("Query 'changes = generateDeltaResponse(driveId, idToQuery)' performed successfully"); - } - } catch (OneDriveException e) { - // OneDrive threw an error - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Query Error: changes = generateDeltaResponse(driveId, idToQuery)"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - - // HTTP request returned status code 404 (Not Found) - if (e.httpStatusCode == 404) { - // Stop application - log.log("\n\nOneDrive returned a 'HTTP 404 - Item not found'"); - log.log("The item id to query was not found on OneDrive"); - log.log("\nRemove your '", cfg.databaseFilePath, "' file and try to sync again\n"); - return; - } - - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query OneDrive drive items"); - } - - // HTTP request returned status code 500 (Internal Server Error) - if (e.httpStatusCode == 500) { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + // Check the item type - if it not an item type that we support, we cant process the JSON item + if (!unwanted) { + if (isItemFile(onedriveJSONItem)) { + log.vdebug("The item we are syncing is a file"); + } else if (isItemFolder(onedriveJSONItem)) { + log.vdebug("The item we are syncing is a folder"); + } else if (isItemRemote(onedriveJSONItem)) { + log.vdebug("The item we are syncing is a remote item"); + } else { + // Why was this unwanted? + if (newItemPath.empty) { + // Compute this item path & need the full path for this file + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + log.vdebug("New Item calculated full path is: ", newItemPath); } - - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // If an error is returned when querying 'changes' and we recall the original function, we go into a never ending loop where the sync never ends - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query OneDrive drive items - retrying applicable request"); - log.vdebug("changes = generateDeltaResponse(driveId, idToQuery) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); - log.vdebug("Retrying Query - using original deltaLink after delay"); - } - // re-try original request - retried for 429 and 504 - try { - log.vdebug("Retrying Query: changes = generateDeltaResponse(driveId, idToQuery)"); - changes = generateDeltaResponse(driveId, idToQuery); - log.vdebug("Query 'changes = generateDeltaResponse(driveId, idToQuery)' performed successfully on re-try"); - } catch (OneDriveException e) { - // display what the error is - log.vdebug("Query Error: changes = generateDeltaResponse(driveId, idToQuery) on re-try after delay"); - // error was not a 504 this time - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } + // Microsoft OneNote container objects present as neither folder or file but has file size + if ((!isItemFile(onedriveJSONItem)) && (!isItemFolder(onedriveJSONItem)) && (hasFileSize(onedriveJSONItem))) { + // Log that this was skipped as this was a Microsoft OneNote item and unsupported + log.vlog("The Microsoft OneNote Notebook '", newItemPath, "' is not supported by this client"); } else { - // Default operation if not 404, 410, 429, 500 or 504 errors - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + // Log that this item was skipped as unsupported + log.vlog("The OneDrive item '", newItemPath, "' is not supported by this client"); } + unwanted = true; + log.vdebug("Flagging as unwanted: item type is not supported"); } - } else { - log.vdebug("Using /delta call to query drive for items to populate 'changes' and 'changesAvailable'"); - // query for changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink); - try { - // Fetch the changes relative to the path id we want to query - log.vdebug("Attempting query 'changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)'"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - log.vdebug("Previous deltaLink: ", deltaLink); - // changes with or without deltaLink - changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink); - if (changes.type() == JSONType.object) { - log.vdebug("Query 'changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)' performed successfully"); - log.vdebug("OneDrive API /delta response: ", changes); - } - } catch (OneDriveException e) { - // OneDrive threw an error - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Query Error: changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)"); - - // HTTP request returned status code 404 (Not Found) - if (e.httpStatusCode == 404) { - // Stop application - log.log("\n\nOneDrive returned a 'HTTP 404 - Item not found'"); - log.log("The item id to query was not found on OneDrive"); - log.log("\nRemove your '", cfg.databaseFilePath, "' file and try to sync again\n"); - return; - } - - // HTTP request returned status code 410 (The requested resource is no longer available at the server) - if (e.httpStatusCode == 410) { - log.vdebug("Delta link expired for 'onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)', setting 'deltaLink = null'"); - deltaLink = null; - continue; - } - - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query changes from OneDrive using deltaLink"); - } - - // HTTP request returned status code 500 (Internal Server Error) - if (e.httpStatusCode == 500) { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // If an error is returned when querying 'changes' and we recall the original function, we go into a never ending loop where the sync never ends - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query for changes - retrying applicable request"); - log.vdebug("changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); - log.vdebug("Retrying Query - using original deltaLink after delay"); - } - // re-try original request - retried for 429 and 504 - try { - log.vdebug("Retrying Query: changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)"); - changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink); - log.vdebug("Query 'changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)' performed successfully on re-try"); - } catch (OneDriveException e) { - // display what the error is - log.vdebug("Query Error: changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink) on re-try after delay"); - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query for changes - retrying applicable request"); - log.vdebug("changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink) previously threw an error - retrying with empty deltaLink"); - try { - // try query with empty deltaLink value - deltaLink = null; - changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink); - log.vdebug("Query 'changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)' performed successfully on re-try"); - } catch (OneDriveException e) { - // Tried 3 times, give up - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } + } + + // Check if this is excluded by config option: skip_dir + if (!unwanted) { + // Only check path if config is != "" + if (!appConfig.getValueString("skip_dir").empty) { + // Is the item a folder? + if (isItemFolder(onedriveJSONItem)) { + // work out the 'snippet' path where this folder would be created + string simplePathToCheck = ""; + string complexPathToCheck = ""; + string matchDisplay = ""; + + if (hasParentReference(onedriveJSONItem)) { + // we need to workout the FULL path for this item + // simple path + if (("name" in onedriveJSONItem["parentReference"]) != null) { + simplePathToCheck = onedriveJSONItem["parentReference"]["name"].str ~ "/" ~ onedriveJSONItem["name"].str; } else { - // error was not a 504 this time - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + simplePathToCheck = onedriveJSONItem["name"].str; + } + log.vdebug("skip_dir path to check (simple): ", simplePathToCheck); + + // complex path + if (parentInDatabase) { + // build up complexPathToCheck + complexPathToCheck = buildNormalizedPath(newItemPath); + } else { + log.vdebug("Parent details not in database - unable to compute complex path to check"); } + if (!complexPathToCheck.empty) { + log.vdebug("skip_dir path to check (complex): ", complexPathToCheck); + } + } else { + simplePathToCheck = onedriveJSONItem["name"].str; } - } else { - // Default operation if not 404, 410, 429, 500 or 504 errors - // Issue #1174 handling where stored deltaLink is invalid - if ((e.httpStatusCode == 400) && (deltaLink != "")) { - // Set deltaLink to an empty entry so invalid URL is not reused - string emptyDeltaLink = ""; - itemdb.setDeltaLink(driveId, idToQuery, emptyDeltaLink); + + // If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder + // then isDirNameExcluded matching will not work + // Clean up 'root:' if present + if (startsWith(simplePathToCheck, "root:")){ + log.vdebug("Updating simplePathToCheck to remove 'root:'"); + simplePathToCheck = strip(simplePathToCheck, "root:"); } - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - } - - // query for changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable); - try { - // Fetch the changes relative to the path id we want to query - log.vdebug("Attempting query 'changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)'"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - log.vdebug("deltaLinkAvailable: ", deltaLinkAvailable); - // changes based on deltaLink - changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable); - if (changesAvailable.type() == JSONType.object) { - log.vdebug("Query 'changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)' performed successfully"); - // are there any delta changes? - if (("value" in changesAvailable) != null) { - deltaChanges = count(changesAvailable["value"].array); - log.vdebug("changesAvailable query reports that there are " , deltaChanges , " changes that need processing on OneDrive"); + if (startsWith(complexPathToCheck, "root:")){ + log.vdebug("Updating complexPathToCheck to remove 'root:'"); + complexPathToCheck = strip(complexPathToCheck, "root:"); + } + + // OK .. what checks are we doing? + if ((!simplePathToCheck.empty) && (complexPathToCheck.empty)) { + // just a simple check + log.vdebug("Performing a simple check only"); + unwanted = selectiveSync.isDirNameExcluded(simplePathToCheck); + } else { + // simple and complex + log.vdebug("Performing a simple then complex path match if required"); + // simple first + log.vdebug("Performing a simple check first"); + unwanted = selectiveSync.isDirNameExcluded(simplePathToCheck); + matchDisplay = simplePathToCheck; + if (!unwanted) { + log.vdebug("Simple match was false, attempting complex match"); + // simple didnt match, perform a complex check + unwanted = selectiveSync.isDirNameExcluded(complexPathToCheck); + matchDisplay = complexPathToCheck; + } + } + // result + log.vdebug("skip_dir exclude result (directory based): ", unwanted); + if (unwanted) { + // This path should be skipped + log.vlog("Skipping item - excluded by skip_dir config: ", matchDisplay); } } - } catch (OneDriveException e) { - // OneDrive threw an error - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Query Error: changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)"); - - // HTTP request returned status code 404 (Not Found) - if (e.httpStatusCode == 404) { - // Stop application - log.log("\n\nOneDrive returned a 'HTTP 404 - Item not found'"); - log.log("The item id to query was not found on OneDrive"); - log.log("\nRemove your '", cfg.databaseFilePath, "' file and try to sync again\n"); - return; - } - - // HTTP request returned status code 410 (The requested resource is no longer available at the server) - if (e.httpStatusCode == 410) { - log.vdebug("Delta link expired for 'onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)', setting 'deltaLinkAvailable = null'"); - deltaLinkAvailable = null; - continue; - } - - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query changes from OneDrive using deltaLinkAvailable"); - } - - // HTTP request returned status code 500 (Internal Server Error) - if (e.httpStatusCode == 500) { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } + // Is the item a file? + // We need to check to see if this files path is excluded as well + if (isItemFile(onedriveJSONItem)) { - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // If an error is returned when querying 'changes' and we recall the original function, we go into a never ending loop where the sync never ends - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query for changes - retrying applicable request"); - log.vdebug("changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); - log.vdebug("Retrying Query - using original deltaLinkAvailable after delay"); + string pathToCheck; + // does the newItemPath start with '/'? + if (!startsWith(newItemPath, "/")){ + // path does not start with '/', but we need to check skip_dir entries with and without '/' + // so always make sure we are checking a path with '/' + pathToCheck = '/' ~ dirName(newItemPath); + } else { + pathToCheck = dirName(newItemPath); } - // re-try original request - retried for 429 and 504 - try { - log.vdebug("Retrying Query: changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)"); - changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable); - log.vdebug("Query 'changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)' performed successfully on re-try"); - if (changesAvailable.type() == JSONType.object) { - // are there any delta changes? - if (("value" in changesAvailable) != null) { - deltaChanges = count(changesAvailable["value"].array); - log.vdebug("changesAvailable query reports that there are " , deltaChanges , " changes that need processing on OneDrive"); - } - } - } catch (OneDriveException e) { - // display what the error is - log.vdebug("Query Error: changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable) on re-try after delay"); - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query for changes - retrying applicable request"); - log.vdebug("changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable) previously threw an error - retrying with empty deltaLinkAvailable"); - // Increase delay and wait again before retry - log.vdebug("Thread sleeping for 90 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(90)); - log.vdebug("Retrying Query - using a null deltaLinkAvailable after delay"); - try { - // try query with empty deltaLinkAvailable value - deltaLinkAvailable = null; - changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable); - log.vdebug("Query 'changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)' performed successfully on re-try"); - if (changesAvailable.type() == JSONType.object) { - // are there any delta changes? - if (("value" in changesAvailable) != null) { - deltaChanges = count(changesAvailable["value"].array); - log.vdebug("changesAvailable query reports that there are " , deltaChanges , " changes that need processing on OneDrive when using a null deltaLink value"); - } - } - } catch (OneDriveException e) { - // Tried 3 times, give up - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - - // OK .. if this was a 504, and running with --download-only & --cleanup-local-files - // need to exit to preserve local data, otherwise potential files will be deleted that should not be deleted - // leading to undesirable potential data loss scenarios - if ((e.httpStatusCode == 504) && (cleanupLocalFiles)) { - // log why we are exiting - log.log("Exiting application due to OneDrive API Gateway Timeout & --download-only & --cleanup-local-files configured to preserve local data"); - // Must exit here - onedrive.shutdown(); - exit(-1); - } - return; - } - } else { - // error was not a 504 this time - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } + + // perform the check + unwanted = selectiveSync.isDirNameExcluded(pathToCheck); + // result + log.vdebug("skip_dir exclude result (file based): ", unwanted); + if (unwanted) { + // this files path should be skipped + log.vlog("Skipping item - file path is excluded by skip_dir config: ", newItemPath); } - } else { - // Default operation if not 404, 410, 429, 500 or 504 errors - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; } } } - // In some OneDrive Business scenarios, the shared folder /delta response lacks the 'root' drive details - // When this occurs, this creates the following error: A database statement execution error occurred: foreign key constraint failed - // Ensure we query independently the root details for this shared folder and ensure that it is added before we process the /delta response - - // However, if we are using a National Cloud Deployment, these deployments do not support /delta, so we generate a /delta response via generateDeltaResponse() - // This specifically adds the root drive details to the self generated /delta response - if ((!nationalCloudDeployment) && (driveId!= defaultDriveId) && (syncBusinessFolders)) { - // fetch this driveId root details to ensure we add this to the database for this remote drive - JSONValue rootData; - - try { - rootData = onedrive.getDriveIdRoot(driveId); - } catch (OneDriveException e) { - log.vdebug("rootData = onedrive.getDriveIdRoot(driveId) generated a OneDriveException"); - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - if (e.httpStatusCode == 429) { - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - retrying applicable request"); - handleOneDriveThrottleRequest(); + // Check if this is excluded by config option: skip_file + if (!unwanted) { + // Is the JSON item a file? + if (isItemFile(onedriveJSONItem)) { + // skip_file can contain 4 types of entries: + // - wildcard - *.txt + // - text + wildcard - name*.txt + // - full path + combination of any above two - /path/name*.txt + // - full path to file - /path/to/file.txt + + // is the parent id in the database? + if (parentInDatabase) { + // Compute this item path & need the full path for this file + if (newItemPath.empty) { + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + log.vdebug("New Item calculated full path is: ", newItemPath); } - if (e.httpStatusCode == 504) { - log.vdebug("Retrying original request that generated the HTTP 504 (Gateway Timeout) - retrying applicable request"); - Thread.sleep(dur!"seconds"(30)); + + // The path that needs to be checked needs to include the '/' + // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched + // However, as 'path' used throughout, use a temp variable with this modification so that we use the temp variable for exclusion checks + string exclusionTestPath = ""; + if (!startsWith(newItemPath, "/")){ + // Add '/' to the path + exclusionTestPath = '/' ~ newItemPath; } - // Retry original request by calling function again to avoid replicating any further error handling - rootData = onedrive.getDriveIdRoot(driveId); + log.vdebug("skip_file item to check: ", exclusionTestPath); + unwanted = selectiveSync.isFileNameExcluded(exclusionTestPath); + log.vdebug("Result: ", unwanted); + if (unwanted) log.vlog("Skipping item - excluded by skip_file config: ", thisItemName); } else { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + // parent id is not in the database + unwanted = true; + log.vlog("Skipping file - parent path not present in local database"); } } - - // apply this root drive data - applyDifference(rootData, driveId, true); } - // Process /delta response from OneDrive - // is changes a valid JSON response - if (changes.type() == JSONType.object) { - // Are there any changes to process? - if ((("value" in changes) != null) && ((deltaChanges > 0) || (oneDriveFullScanTrigger) || (nationalCloudChildrenScan) || (syncBusinessFolders) )) { - auto nrChanges = count(changes["value"].array); - auto changeCount = 0; - - // Display the number of changes or OneDrive objects we are processing - // OneDrive ships 'changes' in ~200 bundles. We display that we are processing X number of objects - // Do not display anything unless we are doing a verbose debug as due to #658 we are essentially doing a --resync each time when using sync_list - - // performance logging output - if (displayProcessingTime) { - writeln("Number of 'change|item' in this API Response Bundle from OneDrive to process: ", nrChanges); + // Check if this is included or excluded by use of sync_list + if (!unwanted) { + // No need to try and process something against a sync_list if it has been configured + if (syncListConfigured) { + // Compute the item path if empty - as to check sync_list we need an actual path to check + if (newItemPath.empty) { + // Calculate this items path + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + log.vdebug("New Item calculated full path is: ", newItemPath); } - // is nrChanges >= min_notify_changes (default of min_notify_changes = 5) - if (nrChanges >= cfg.getValueLong("min_notify_changes")) { - // nrChanges is >= than min_notify_changes - // verbose log, no 'notify' .. it is over the top - if (!syncListConfigured) { - // sync_list is not being used - lets use the right messaging here - if (oneDriveFullScanTrigger) { - // full scan was triggered out of cycle - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state due to a full scan being triggered by actions on OneDrive"); - // unset now the full scan trigger if set - unsetOneDriveFullScanTrigger(); - } else { - // no sync_list in use, oneDriveFullScanTrigger not set via sync_list or skip_dir - if (performFullItemScan){ - // performFullItemScan was set - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state due to a full scan being requested"); - } else { - // default processing message - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state"); - } - } + // What path are we checking? + log.vdebug("sync_list item to check: ", newItemPath); + + // Unfortunatly there is no avoiding this call to check if the path is excluded|included via sync_list + if (selectiveSync.isPathExcludedViaSyncList(newItemPath)) { + // selective sync advised to skip, however is this a file and are we configured to upload / download files in the root? + if ((isItemFile(onedriveJSONItem)) && (appConfig.getValueBool("sync_root_files")) && (rootName(newItemPath) == "") ) { + // This is a file + // We are configured to sync all files in the root + // This is a file in the logical root + unwanted = false; } else { - // sync_list is being used - why are we going through the entire OneDrive contents? - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state due to sync_list being used"); - } - } else { - // There are valid changes but less than the min_notify_changes configured threshold - // We will only output the number of changes being processed to debug log if this is set to assist with debugging - // As this is debug logging, messaging can be the same, regardless of sync_list being used or not - - // is performFullItemScan set due to a full scan required? - // is oneDriveFullScanTrigger set due to a potentially out-of-scope item now being in-scope - if ((performFullItemScan) || (oneDriveFullScanTrigger)) { - // oneDriveFullScanTrigger should be false unless set by actions on OneDrive and only if sync_list or skip_dir is used - log.vdebug("performFullItemScan or oneDriveFullScanTrigger = true"); - // full scan was requested or triggered - // use the right message - if (oneDriveFullScanTrigger) { - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state due to a full scan being triggered by actions on OneDrive"); - // unset now the full scan trigger if set - unsetOneDriveFullScanTrigger(); - } else { - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state due to a full scan being requested"); + // path is unwanted + unwanted = true; + log.vlog("Skipping item - excluded by sync_list config: ", newItemPath); + // flagging to skip this item now, but does this exist in the DB thus needs to be removed / deleted? + if (existingDBEntry) { + // flag to delete + log.vlog("Flagging item for local delete as item exists in database: ", newItemPath); + idsToDelete ~= [thisItemDriveId, thisItemId]; } - } else { - // standard message - log.vlog("Number of items from OneDrive to process: ", nrChanges); } } - - // Add nrChanges to cumulativeOneDriveItemCount so we can detail how may items in total were processed - cumulativeOneDriveItemCount = cumulativeOneDriveItemCount + nrChanges; - - foreach (item; changes["value"].array) { - bool isRoot = false; - string thisItemParentPath; - string thisItemFullPath; - changeCount++; - - // Change as reported by OneDrive - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Processing change ", changeCount, " of ", nrChanges); - log.vdebug("OneDrive Change: ", item); - - // Deleted items returned from onedrive.viewChangesByItemId or onedrive.viewChangesByDriveId (/delta) do not have a 'name' attribute - // Thus we cannot name check for 'root' below on deleted items - if(!isItemDeleted(item)){ - // This is not a deleted item - log.vdebug("Not a OneDrive deleted item change"); - // Test is this is the OneDrive Users Root? - // Debug output of change evaluation items - log.vdebug("defaultRootId = ", defaultRootId); - log.vdebug("'search id' = ", id); - log.vdebug("id == defaultRootId = ", (id == defaultRootId)); - log.vdebug("isItemRoot(item) = ", (isItemRoot(item))); - log.vdebug("item['name'].str == 'root' = ", (item["name"].str == "root")); - log.vdebug("singleDirectoryScope = ", (singleDirectoryScope)); - - // Use the global's as initialised via init() rather than performing unnecessary additional HTTPS calls - // In a --single-directory scenario however, '(id == defaultRootId) = false' for root items - if ( ((id == defaultRootId) || (singleDirectoryScope)) && (isItemRoot(item)) && (item["name"].str == "root")) { - // This IS a OneDrive Root item - log.vdebug("Change will flagged as a 'root' item change"); - isRoot = true; - } - } - - // How do we handle this change? - if (isRoot || !hasParentReferenceId(item) || isItemDeleted(item)){ - // Is a root item, has no id in parentReference or is a OneDrive deleted item - log.vdebug("isRoot = ", isRoot); - log.vdebug("!hasParentReferenceId(item) = ", (!hasParentReferenceId(item))); - log.vdebug("isItemDeleted(item) = ", (isItemDeleted(item))); - log.vdebug("Handling change as 'root item', has no parent reference or is a deleted item"); - applyDifference(item, driveId, isRoot); - } else { - // What is this item's parent path? - if (hasParentReferencePath(item)) { - thisItemParentPath = item["parentReference"]["path"].str; - thisItemFullPath = thisItemParentPath ~ "/" ~ item["name"].str; - } else { - thisItemParentPath = ""; - } - - // Special case handling flags - bool singleDirectorySpecialCase = false; - bool sharedFoldersSpecialCase = false; - - // Debug output of change evaluation items - log.vdebug("'parentReference id' = ", item["parentReference"]["id"].str); - log.vdebug("search criteria: syncFolderName = ", syncFolderName); - log.vdebug("search criteria: syncFolderPath = ", syncFolderPath); - log.vdebug("search criteria: syncFolderChildPath = ", syncFolderChildPath); - log.vdebug("thisItemId = ", item["id"].str); - log.vdebug("thisItemParentPath = ", thisItemParentPath); - log.vdebug("thisItemFullPath = ", thisItemFullPath); - log.vdebug("'item id' matches search 'id' = ", (item["id"].str == id)); - log.vdebug("'parentReference id' matches search 'id' = ", (item["parentReference"]["id"].str == id)); - log.vdebug("'thisItemParentPath' contains 'syncFolderChildPath' = ", (canFind(thisItemParentPath, syncFolderChildPath))); - log.vdebug("'thisItemParentPath' contains search 'id' = ", (canFind(thisItemParentPath, id))); - - // Special case handling - --single-directory - // If we are in a --single-directory sync scenario, and, the DB does not contain any parent details, or --single-directory is used with --resync - // all changes will be discarded as 'Remote change discarded - not in --single-directory sync scope (not in DB)' even though, some of the changes - // are actually valid and required as they are part of the parental path - if (singleDirectoryScope){ - // What is the full path for this item from OneDrive - log.vdebug("'syncFolderChildPath' contains 'thisItemFullPath' = ", (canFind(syncFolderChildPath, thisItemFullPath))); - if (canFind(syncFolderChildPath, thisItemFullPath)) { - singleDirectorySpecialCase = true; - } - } - - // Special case handling - Shared Business Folders - // - IF we are syncing shared folders, and the shared folder is not the 'top level' folder being shared out - // canFind(thisItemParentPath, syncFolderChildPath) will never match: - // Syncing this OneDrive Business Shared Folder: MyFolderName - // OneDrive Business Shared By: Firstname Lastname (email@address) - // Applying changes of Path ID: pathId - // [DEBUG] Sync Folder Name: MyFolderName - // [DEBUG] Sync Folder Path: /drives/driveId/root:/TopLevel/ABCD - // [DEBUG] Sync Folder Child Path: /drives/driveId/root:/TopLevel/ABCD/MyFolderName/ - // ... - // [DEBUG] 'item id' matches search 'id' = false - // [DEBUG] 'parentReference id' matches search 'id' = false - // [DEBUG] 'thisItemParentPath' contains 'syncFolderChildPath' = false - // [DEBUG] 'thisItemParentPath' contains search 'id' = false - // [DEBUG] Change does not match any criteria to apply - // Remote change discarded - not in business shared folders sync scope - - if ((!canFind(thisItemParentPath, syncFolderChildPath)) && (syncBusinessFolders)) { - // Syncing Shared Business folders & we dont have a path match - // is this a reverse path match? - log.vdebug("'thisItemParentPath' contains 'syncFolderName' = ", (canFind(thisItemParentPath, syncFolderName))); - if (canFind(thisItemParentPath, syncFolderName)) { - sharedFoldersSpecialCase = true; - } - } - - // Check this item's path to see if this is a change on the path we want: - // 1. 'item id' matches 'id' - // 2. 'parentReference id' matches 'id' - // 3. 'item path' contains 'syncFolderChildPath' - // 4. 'item path' contains 'id' - // 5. Special Case was triggered - if ( (item["id"].str == id) || (item["parentReference"]["id"].str == id) || (canFind(thisItemParentPath, syncFolderChildPath)) || (canFind(thisItemParentPath, id)) || (singleDirectorySpecialCase) || (sharedFoldersSpecialCase) ){ - // This is a change we want to apply - if ((!singleDirectorySpecialCase) && (!sharedFoldersSpecialCase)) { - log.vdebug("Change matches search criteria to apply"); - } else { - if (singleDirectorySpecialCase) log.vdebug("Change matches search criteria to apply - special case criteria - reverse path matching used (--single-directory)"); - if (sharedFoldersSpecialCase) log.vdebug("Change matches search criteria to apply - special case criteria - reverse path matching used (Shared Business Folders)"); - } - // Apply OneDrive change - applyDifference(item, driveId, isRoot); - } else { - // No item ID match or folder sync match - log.vdebug("Change does not match any criteria to apply"); - - // Before discarding change - does this ID still exist on OneDrive - as in IS this - // potentially a --single-directory sync and the user 'moved' the file out of the 'sync-dir' to another OneDrive folder - // This is a corner edge case - https://github.com/skilion/onedrive/issues/341 - - // What is the original local path for this ID in the database? Does it match 'syncFolderChildPath' - if (itemdb.idInLocalDatabase(driveId, item["id"].str)){ - // item is in the database - string originalLocalPath = computeItemPath(driveId, item["id"].str); - - if (canFind(originalLocalPath, syncFolderChildPath)){ - JSONValue oneDriveMovedNotDeleted; - try { - oneDriveMovedNotDeleted = onedrive.getPathDetailsById(driveId, item["id"].str); - } catch (OneDriveException e) { - log.vdebug("oneDriveMovedNotDeleted = onedrive.getPathDetailsById(driveId, item['id'].str); generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // No .. that ID is GONE - log.vlog("Remote change discarded - item cannot be found"); - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry request after delay - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling oneDriveMovedNotDeleted = onedrive.getPathDetailsById(driveId, item['id'].str);"); - try { - oneDriveMovedNotDeleted = onedrive.getPathDetailsById(driveId, item["id"].str); - } catch (OneDriveException e) { - // A further error was generated - // Rather than retry original function, retry the actual call and replicate error handling - if (e.httpStatusCode == 404) { - // No .. that ID is GONE - log.vlog("Remote change discarded - item cannot be found"); - } else { - // not a 404 - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - } - } - } else { - // not a 404 or a 429 - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - } - } - - // Yes .. ID is still on OneDrive but elsewhere .... #341 edge case handling - // This 'change' relates to an item that WAS in 'syncFolderChildPath' but is now - // stored elsewhere on OneDrive - outside the path we are syncing from - // Remove this item locally as it's local path is now obsolete - idsToDelete ~= [driveId, item["id"].str]; - } else { - // out of scope for some other reason - if (singleDirectoryScope){ - log.vlog("Remote change discarded - not in --single-directory sync scope (in DB)"); - } else { - log.vlog("Remote change discarded - not in sync scope"); - } - log.vdebug("Remote change discarded: ", item); - } - } else { - // item is not in the database - if (singleDirectoryScope){ - // We are syncing a single directory, so this is the reason why it is out of scope - log.vlog("Remote change discarded - not in --single-directory sync scope (not in DB)"); - log.vdebug("Remote change discarded: ", item); - } else { - // Not a single directory sync - if (syncBusinessFolders) { - // if we are syncing shared business folders, a 'change' may be out of scope as we are not syncing that 'folder' - // but we are sent all changes from the 'parent root' as we cannot query the 'delta' for this folder - // as that is a 501 error - not implemented - log.vlog("Remote change discarded - not in business shared folders sync scope"); - log.vdebug("Remote change discarded: ", item); - } else { - // out of scope for some other reason - log.vlog("Remote change discarded - not in sync scope"); - log.vdebug("Remote change discarded: ", item); - } - } - } - } - } - } - } else { - // No changes reported on OneDrive - log.vdebug("OneDrive Reported no delta changes - Local path and OneDrive in-sync"); - } - - // the response may contain either @odata.deltaLink or @odata.nextLink - if ("@odata.deltaLink" in changes) { - deltaLink = changes["@odata.deltaLink"].str; - log.vdebug("Setting next deltaLink to (@odata.deltaLink): ", deltaLink); - } - if (deltaLink != "") { - // we initialise deltaLink to a blank string - if it is blank, dont update the DB to be empty - log.vdebug("Updating completed deltaLink in DB to: ", deltaLink); - itemdb.setDeltaLink(driveId, id, deltaLink); - } - - // Processing Timing for this bundle - if (displayProcessingTime) { - endBundleProcessingTime = Clock.currTime(); - writeln("End 'change|item' API Response Bundle Processing Time: ", endBundleProcessingTime); - writeln("Elapsed Processing Time: ", (endBundleProcessingTime - startBundleProcessingTime)); - } - - if ("@odata.nextLink" in changes) { - // Update deltaLink to next changeSet bundle - deltaLink = changes["@odata.nextLink"].str; - // Update deltaLinkAvailable to next changeSet bundle to quantify how many changes we have to process - deltaLinkAvailable = changes["@odata.nextLink"].str; - log.vdebug("Setting next deltaLink & deltaLinkAvailable to (@odata.nextLink): ", deltaLink); - } - else break; - } else { - // Log that an invalid JSON object was returned - if ((driveId == defaultDriveId) || (!syncBusinessFolders)) { - log.vdebug("onedrive.viewChangesByItemId call returned an invalid JSON Object"); - } else { - log.vdebug("onedrive.viewChangesByDriveId call returned an invalid JSON Object"); - } - } - } - - // delete items in idsToDelete - if (idsToDelete.length > 0) deleteItems(); - // empty the skipped items - skippedItems.length = 0; - assumeSafeAppend(skippedItems); - - // Processing timing and metrics for everything that was processed - if (displayProcessingTime) { - endFunctionProcessingTime = Clock.currTime(); - // complete the bundle output - writeln("------------------------------------------------------------"); - writeln("Start Function Processing Time: ", startFunctionProcessingTime); - writeln("End Function Processing Time: ", endFunctionProcessingTime); - writeln("Elapsed Function Processing Time: ", (endFunctionProcessingTime - startFunctionProcessingTime)); - writeln("Total number of OneDrive items processed: ", cumulativeOneDriveItemCount); - writeln("============================================================"); - } - } - - // process the change of a single DriveItem - private void applyDifference(JSONValue driveItem, string driveId, bool isRoot) - { - // Format the OneDrive change into a consumable object for the database - Item item = makeItem(driveItem); - - // Reset the malwareDetected flag for this item - malwareDetected = false; - - // Reset the downloadFailed flag for this item - downloadFailed = false; - - // Path we will be using - string path = ""; - - if(isItemDeleted(driveItem)){ - // Change is to delete an item - log.vdebug("Remote deleted item"); - } else { - // Is the change from OneDrive a 'root' item - // The change should be considered a 'root' item if: - // 1. Contains a ["root"] element - // 2. Has no ["parentReference"]["id"] ... #323 & #324 highlighted that this is false as some 'root' shared objects now can have an 'id' element .. OneDrive API change - // 2. Has no ["parentReference"]["path"] - // 3. Was detected by an input flag as to be handled as a root item regardless of actual status - if (isItemRoot(driveItem) || !hasParentReferencePath(driveItem) || isRoot) { - log.vdebug("Handing a OneDrive 'root' change"); - item.parentId = null; // ensures that it has no parent - item.driveId = driveId; // HACK: makeItem() cannot set the driveId property of the root - log.vdebug("Update/Insert local database with item details"); - itemdb.upsert(item); - log.vdebug("item details: ", item); - return; - } - } - - bool unwanted; - // Check if the parent id is something we need to skip - if (skippedItems.find(item.parentId).length != 0) { - // Potentially need to flag as unwanted - log.vdebug("Flagging as unwanted: find(item.parentId).length != 0"); - unwanted = true; - - // Is this item id in the database? - if (itemdb.idInLocalDatabase(item.driveId, item.id)){ - // item exists in database, most likely moved out of scope for current client configuration - log.vdebug("This item was previously synced / seen by the client"); - if (("name" in driveItem["parentReference"]) != null) { - // How is this out of scope? - // is sync_list configured - if (syncListConfigured) { - // sync_list configured and in use - if (selectiveSync.isPathExcludedViaSyncList(driveItem["parentReference"]["name"].str)) { - // Previously synced item is now out of scope as it has been moved out of what is included in sync_list - log.vdebug("This previously synced item is now excluded from being synced due to sync_list exclusion"); - } - } - // flag to delete local file as it now is no longer in sync with OneDrive - log.vdebug("Flagging to delete item locally"); - idsToDelete ~= [item.driveId, item.id]; - } - } - } - - // Check if this is excluded by config option: skip_dir - if (!unwanted) { - // Only check path if config is != "" - if (cfg.getValueString("skip_dir") != "") { - // Is the item a folder and not a deleted item? - if ((isItemFolder(driveItem)) && (!isItemDeleted(driveItem))) { - // work out the 'snippet' path where this folder would be created - string simplePathToCheck = ""; - string complexPathToCheck = ""; - string matchDisplay = ""; - - if (hasParentReference(driveItem)) { - // we need to workout the FULL path for this item - string parentDriveId = driveItem["parentReference"]["driveId"].str; - string parentItem = driveItem["parentReference"]["id"].str; - // simple path - if (("name" in driveItem["parentReference"]) != null) { - simplePathToCheck = driveItem["parentReference"]["name"].str ~ "/" ~ driveItem["name"].str; - } else { - simplePathToCheck = driveItem["name"].str; - } - log.vdebug("skip_dir path to check (simple): ", simplePathToCheck); - // complex path - if (itemdb.idInLocalDatabase(parentDriveId, parentItem)){ - // build up complexPathToCheck - complexPathToCheck = computeItemPath(parentDriveId, parentItem) ~ "/" ~ driveItem["name"].str; - complexPathToCheck = buildNormalizedPath(complexPathToCheck); - } else { - log.vdebug("Parent details not in database - unable to compute complex path to check"); - } - log.vdebug("skip_dir path to check (complex): ", complexPathToCheck); - } else { - simplePathToCheck = driveItem["name"].str; - } - - // If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder - // then isDirNameExcluded matching will not work - // Clean up 'root:' if present - if (startsWith(simplePathToCheck, "root:")){ - log.vdebug("Updating simplePathToCheck to remove 'root:'"); - simplePathToCheck = strip(simplePathToCheck, "root:"); - } - if (startsWith(complexPathToCheck, "root:")){ - log.vdebug("Updating complexPathToCheck to remove 'root:'"); - complexPathToCheck = strip(complexPathToCheck, "root:"); - } - - // OK .. what checks are we doing? - if ((simplePathToCheck != "") && (complexPathToCheck == "")) { - // just a simple check - log.vdebug("Performing a simple check only"); - unwanted = selectiveSync.isDirNameExcluded(simplePathToCheck); - } else { - // simple and complex - log.vdebug("Performing a simple & complex path match if required"); - // simple first - unwanted = selectiveSync.isDirNameExcluded(simplePathToCheck); - matchDisplay = simplePathToCheck; - if (!unwanted) { - log.vdebug("Simple match was false, attempting complex match"); - // simple didnt match, perform a complex check - unwanted = selectiveSync.isDirNameExcluded(complexPathToCheck); - matchDisplay = complexPathToCheck; + } + } + + // Check if the user has configured to skip downloading .files or .folders: skip_dotfiles + if (!unwanted) { + if (appConfig.getValueBool("skip_dotfiles")) { + if (isDotFile(newItemPath)) { + log.vlog("Skipping item - .file or .folder: ", newItemPath); + unwanted = true; + } + } + } + + // Check if this should be skipped due to a --check-for-nosync directive (.nosync)? + if (!unwanted) { + if (appConfig.getValueBool("check_nosync")) { + // need the parent path for this object + string parentPath = dirName(newItemPath); + // Check for the presence of a .nosync in the parent path + if (exists(parentPath ~ "/.nosync")) { + log.vlog("Skipping downloading item - .nosync found in parent folder & --check-for-nosync is enabled: ", newItemPath); + unwanted = true; + } + } + } + + // Check if this is excluded by a user set maximum filesize to download + if (!unwanted) { + if (isItemFile(onedriveJSONItem)) { + if (fileSizeLimit != 0) { + if (onedriveJSONItem["size"].integer >= fileSizeLimit) { + log.vlog("Skipping item - excluded by skip_size config: ", thisItemName, " (", onedriveJSONItem["size"].integer/2^^20, " MB)"); } } - - log.vdebug("Result: ", unwanted); - if (unwanted) log.vlog("Skipping item - excluded by skip_dir config: ", matchDisplay); } } - } - - // Check if this is excluded by config option: skip_file - if (!unwanted) { - // Is the item a file and not a deleted item? - if ((isItemFile(driveItem)) && (!isItemDeleted(driveItem))) { - // skip_file can contain 4 types of entries: - // - wildcard - *.txt - // - text + wildcard - name*.txt - // - full path + combination of any above two - /path/name*.txt - // - full path to file - /path/to/file.txt - - // is the parent id in the database? - if (itemdb.idInLocalDatabase(item.driveId, item.parentId)){ - // Compute this item path & need the full path for this file - path = computeItemPath(item.driveId, item.parentId) ~ "/" ~ item.name; - - // The path that needs to be checked needs to include the '/' - // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched - // However, as 'path' used throughout, use a temp variable with this modification so that we use the temp variable for exclusion checks - string exclusionTestPath = ""; - if (!startsWith(path, "/")){ - // Add '/' to the path - exclusionTestPath = '/' ~ path; - } - - log.vdebug("skip_file item to check: ", exclusionTestPath); - unwanted = selectiveSync.isFileNameExcluded(exclusionTestPath); - log.vdebug("Result: ", unwanted); - if (unwanted) log.vlog("Skipping item - excluded by skip_file config: ", item.name); - } else { - // parent id is not in the database - unwanted = true; - log.vlog("Skipping file - parent path not present in local database"); + + // At this point all the applicable checks on this JSON object from OneDrive are complete: + // - skip_file + // - skip_dir + // - sync_list + // - skip_dotfiles + // - check_nosync + // - skip_size + // - We know if this item exists in the DB or not in the DB + + // We know if this JSON item is unwanted or not + if (unwanted) { + // This JSON item is NOT wanted - it is excluded + log.vdebug("Skipping OneDrive change as this is determined to be unwanted"); + // Add to the skippedItems array, but only if it is a directory ... pointless adding 'files' here, as it is the 'id' we check as the parent path which can only be a directory + if (!isItemFile(onedriveJSONItem)) { + skippedItems.insert(thisItemId); } - } - } - - // check the item type - if (!unwanted) { - if (isItemFile(driveItem)) { - log.vdebug("The item we are syncing is a file"); - } else if (isItemFolder(driveItem)) { - log.vdebug("The item we are syncing is a folder"); - } else if (isItemRemote(driveItem)) { - log.vdebug("The item we are syncing is a remote item"); - assert(isItemFolder(driveItem["remoteItem"]), "The remote item is not a folder"); } else { - // Why was this unwanted? - if (path.empty) { - // Compute this item path & need the full path for this file - path = computeItemPath(item.driveId, item.parentId) ~ "/" ~ item.name; - } - // Microsoft OneNote container objects present as neither folder or file but has file size - if ((!isItemFile(driveItem)) && (!isItemFolder(driveItem)) && (hasFileSize(driveItem))) { - // Log that this was skipped as this was a Microsoft OneNote item and unsupported - log.vlog("The Microsoft OneNote Notebook '", path, "' is not supported by this client"); + // This JSON item is wanted - we need to process this JSON item further + // Take the JSON item and create a consumable object for eventual database insertion + Item newDatabaseItem = makeItem(onedriveJSONItem); + + if (existingDBEntry) { + // The details of this JSON item are already in the DB + // Is the item in the DB the same as the JSON data provided - or is the JSON data advising this is an updated file? + log.vdebug("OneDrive change is an update to an existing local item"); + // Compute the existing item path + // NOTE: + // string existingItemPath = computeItemPath(existingDatabaseItem.driveId, existingDatabaseItem.id); + // + // This will calculate the path as follows: + // + // existingItemPath: Document.txt + // + // Whereas above we use the following + // + // newItemPath = computeItemPath(newDatabaseItem.driveId, newDatabaseItem.parentId) ~ "/" ~ newDatabaseItem.name; + // + // Which generates the following path: + // + // changedItemPath: ./Document.txt + // + // Need to be consistent here with how 'newItemPath' was calculated + string existingItemPath = computeItemPath(existingDatabaseItem.driveId, existingDatabaseItem.parentId) ~ "/" ~ existingDatabaseItem.name; + // Attempt to apply this changed item + applyPotentiallyChangedItem(existingDatabaseItem, existingItemPath, newDatabaseItem, newItemPath, onedriveJSONItem); } else { - // Log that this item was skipped as unsupported - log.vlog("The OneDrive item '", path, "' is not supported by this client"); + // Action this JSON item as a new item as we have no DB record of it + // The actual item may actually exist locally already, meaning that just the database is out-of-date or missing the data due to --resync + // But we also cannot compute the newItemPath as the parental objects may not exist as well + log.vdebug("OneDrive change is potentially a new local item"); + + // Attempt to apply this potentially new item + applyPotentiallyNewLocalItem(newDatabaseItem, onedriveJSONItem, newItemPath); } - unwanted = true; - log.vdebug("Flagging as unwanted: item type is not supported"); } + + // Tracking as to if this item was processed + processedCount++; } - - // Check if this is included by use of sync_list - if (!unwanted) { - // Is the item parent in the local database? - if (itemdb.idInLocalDatabase(item.driveId, item.parentId)){ - // parent item is in the local database - // compute the item path if empty - if (path.empty) { - path = computeItemPath(item.driveId, item.parentId) ~ "/" ~ item.name; - } - // what path are we checking - log.vdebug("sync_list item to check: ", path); - - // Unfortunatly there is no avoiding this call to check if the path is excluded|included via sync_list - if (selectiveSync.isPathExcludedViaSyncList(path)) { - // selective sync advised to skip, however is this a file and are we configured to upload / download files in the root? - if ((isItemFile(driveItem)) && (cfg.getValueBool("sync_root_files")) && (rootName(path) == "") ) { - // This is a file - // We are configured to sync all files in the root - // This is a file in the logical root - unwanted = false; - } else { - // path is unwanted - unwanted = true; - log.vlog("Skipping item - excluded by sync_list config: ", path); - // flagging to skip this file now, but does this exist in the DB thus needs to be removed / deleted? - if (itemdb.idInLocalDatabase(item.driveId, item.id)){ - log.vlog("Flagging item for local delete as item exists in database: ", path); - // flag to delete - idsToDelete ~= [item.driveId, item.id]; - } - } - } - } else { - // Parent not in the database - // Is the parent a 'folder' from another user? ie - is this a 'shared folder' that has been shared with us? - if (defaultDriveId == item.driveId){ - // Flagging as unwanted - log.vdebug("Flagging as unwanted: item.driveId (", item.driveId,"), item.parentId (", item.parentId,") not in local database"); - unwanted = true; + } + + // Perform the download of any required objects in parallel + void processDownloadActivities() { + + // Are there any items to delete locally? Cleanup space locally first + if (!idsToDelete.empty) { + // There are elements that potentially need to be deleted locally + log.vlog("Items to potentially delete locally: ", idsToDelete.length); + if (appConfig.getValueBool("download_only")) { + // Download only has been configured + if (cleanupLocalFiles) { + // Process online deleted items + log.vlog("Processing local deletion activity as --download-only & --cleanup-local-files configured"); + processDeleteItems(); } else { - // Edge case as the parent (from another users OneDrive account) will never be in the database - log.vdebug("The reported parentId is not in the database. This potentially is a shared folder as 'item.driveId' != 'defaultDriveId'. Relevant Details: item.driveId (", item.driveId,"), item.parentId (", item.parentId,")"); - // If we are syncing OneDrive Business Shared Folders, a 'folder' shared with us, has a 'parent' that is not shared with us hence the above message - // What we need to do is query the DB for this 'item.driveId' and use the response from the DB to set the 'item.parentId' for this new item we are trying to add to the database - if (syncBusinessFolders) { - foreach(dbItem; itemdb.selectByDriveId(item.driveId)) { - if (dbItem.name == "root") { - // Ensure that this item uses the root id as parent - log.vdebug("Falsifying item.parentId to be ", dbItem.id); - item.parentId = dbItem.id; - } - } - } else { - // Ensure that this item has no parent - log.vdebug("Setting item.parentId to be null"); - item.parentId = null; - } - log.vdebug("Update/Insert local database with item details"); - itemdb.upsert(item); - log.vdebug("item details: ", item); - return; + // Not cleaning up local files + log.vlog("Skipping local deletion activity as --download-only has been used"); } - } - } - - // skip downloading dot files if configured - if (cfg.getValueBool("skip_dotfiles")) { - if (isDotFile(path)) { - log.vlog("Skipping item - .file or .folder: ", path); - unwanted = true; - } - } - - // skip unwanted items early - if (unwanted) { - log.vdebug("Skipping OneDrive change as this is determined to be unwanted"); - skippedItems ~= item.id; - return; - } - - // check if the item has been seen before - Item oldItem; - bool cached = itemdb.selectById(item.driveId, item.id, oldItem); - - // check if the item is going to be deleted - if (isItemDeleted(driveItem)) { - // item.name is not available, so we get a bunch of meaningless log output - // Item name we will attempt to delete will be printed out later - if (cached) { - // flag to delete - log.vdebug("Flagging item for deletion: ", item); - idsToDelete ~= [item.driveId, item.id]; } else { - // flag to ignore - log.vdebug("Flagging item to skip: ", item); - skippedItems ~= item.id; - } - return; - } - - // rename the local item if it is unsynced and there is a new version of it on OneDrive - string oldPath; - if (cached && item.eTag != oldItem.eTag) { - // Is the item in the local database - if (itemdb.idInLocalDatabase(item.driveId, item.id)){ - log.vdebug("OneDrive item ID is present in local database"); - // Compute this item path - oldPath = computeItemPath(item.driveId, item.id); - // Query DB for existing local item in specified path - string itemSource = "database"; - if (!isItemSynced(oldItem, oldPath, itemSource)) { - if (exists(oldPath)) { - // Is the local file technically 'newer' based on UTC timestamp? - SysTime localModifiedTime = timeLastModified(oldPath).toUTC(); - localModifiedTime.fracSecs = Duration.zero; - item.mtime.fracSecs = Duration.zero; - - // debug the output of time comparison - log.vdebug("localModifiedTime (local file): ", localModifiedTime); - log.vdebug("item.mtime (OneDrive item): ", item.mtime); - - // Compare file on disk modified time with modified time provided by OneDrive API - if (localModifiedTime >= item.mtime) { - // local file is newer or has the same time than the item on OneDrive - log.vdebug("Skipping OneDrive change as this is determined to be unwanted due to local item modified time being newer or equal to item modified time from OneDrive"); - // no local rename - // no download needed - if (localModifiedTime == item.mtime) { - log.vlog("Local item modified time is equal to OneDrive item modified time based on UTC time conversion - keeping local item"); - } else { - log.vlog("Local item modified time is newer than OneDrive item modified time based on UTC time conversion - keeping local item"); - } - skippedItems ~= item.id; - return; - } else { - // remote file is newer than local item - log.vlog("Remote item modified time is newer based on UTC time conversion"); // correct message, remote item is newer - auto ext = extension(oldPath); - auto newPath = path.chomp(ext) ~ "-" ~ deviceName ~ ext; - - // has the user configured to IGNORE local data protection rules? - if (bypassDataPreservation) { - // The user has configured to ignore data safety checks and overwrite local data rather than preserve & rename - log.vlog("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: ", oldPath); - } else { - // local data protection is configured, renaming local file - log.vlog("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent data loss: ", oldPath, " -> ", newPath); - - // perform the rename action - if (!dryRun) { - safeRename(oldPath); - } else { - // Expectation here is that there is a new file locally (newPath) however as we don't create this, the "new file" will not be uploaded as it does not exist - log.vdebug("DRY-RUN: Skipping local file rename"); - } - } - } - } - cached = false; - } + // Not using --download-only process normally + processDeleteItems(); } + // Cleanup array memory + idsToDelete = []; } - - // update the item - if (cached) { - // the item is in the items.sqlite3 database - log.vdebug("OneDrive change is an update to an existing local item"); - applyChangedItem(oldItem, oldPath, item, path); - } else { - log.vdebug("OneDrive change is potentially a new local item"); - // Check if file should be skipped based on size limit - if (isItemFile(driveItem)) { - if (cfg.getValueLong("skip_size") != 0) { - if (driveItem["size"].integer >= this.newSizeLimit) { - log.vlog("Skipping item - excluded by skip_size config: ", item.name, " (", driveItem["size"].integer/2^^20, " MB)"); - return; - } - } - } - // apply this new item - applyNewItem(item, path); + + // Are there any items to download post fetching and processing the /delta data? + if (!fileJSONItemsToDownload.empty) { + // There are elements to download + log.vlog("Number of items to download from OneDrive: ", fileJSONItemsToDownload.length); + downloadOneDriveItems(); + // Cleanup array memory + fileJSONItemsToDownload = []; } - - if ((malwareDetected == false) && (downloadFailed == false)){ - // save the item in the db - // if the file was detected as malware and NOT downloaded, we dont want to falsify the DB as downloading it as otherwise the next pass will think it was deleted, thus delete the remote item - // Likewise if the download failed, we dont want to falsify the DB as downloading it as otherwise the next pass will think it was deleted, thus delete the remote item - if (cached) { - // the item is in the items.sqlite3 database - // Do we need to update the database with the details that were provided by the OneDrive API? - // Is the last modified timestamp in the DB the same as the API data? - SysTime localModifiedTime = oldItem.mtime; - localModifiedTime.fracSecs = Duration.zero; - SysTime remoteModifiedTime = item.mtime; - remoteModifiedTime.fracSecs = Duration.zero; - - // If the timestamp is different, or we are running on a National Cloud Deployment that does not support /delta queries - we have to update the DB with the details from OneDrive - // Unfortunatly because of the consequence of Nataional Cloud Deployments not supporting /delta queries, the application uses the local database to flag what is out-of-date / track changes - // This means that the constant disk writing to the database fix implemented with https://github.com/abraunegg/onedrive/pull/2004 cannot be utilised when using Nataional Cloud Deployments - // as all records are touched / updated when performing the OneDrive sync operations. The only way to change this, is for Microsoft to support /delta queries for Nataional Cloud Deployments - if ((localModifiedTime != remoteModifiedTime) || (nationalCloudDeployment)) { - // Database update needed for this item because our local record is out-of-date - log.vdebug("Updating local database with item details from OneDrive as local record needs to be updated"); - itemdb.update(item); - } - } else { - // item is not in the items.sqlite3 database - log.vdebug("Inserting new item details to local database"); - itemdb.insert(item); - } - // What was the item that was saved - log.vdebug("item details: ", item); - } else { - // flag was tripped, which was it - if (downloadFailed) { - log.vdebug("Download or creation of local directory failed"); - } - if (malwareDetected) { - log.vdebug("OneDrive reported that file contained malware"); - } + + // Are there any skipped items still? + if (!skippedItems.empty) { + // Cleanup array memory + skippedItems.clear(); } } - - // download an item that was not synced before - private void applyNewItem(const ref Item item, const(string) path) - { - // Test for the local path existence - if (exists(path)) { + + // If the JSON item is not in the database, it is potentially a new item that we need to action + void applyPotentiallyNewLocalItem(Item newDatabaseItem, JSONValue onedriveJSONItem, string newItemPath) { + + // The JSON and Database items being passed in here have passed the following checks: + // - skip_file + // - skip_dir + // - sync_list + // - skip_dotfiles + // - check_nosync + // - skip_size + // - Is not currently cached in the local database + // As such, we should not be doing any other checks here to determine if the JSON item is wanted .. it is + + if (exists(newItemPath)) { // Issue #2209 fix - test if path is a bad symbolic link - if (isSymlink(path)) { + if (isSymlink(newItemPath)) { log.vdebug("Path on local disk is a symbolic link ........"); - if (!exists(readLink(path))) { + if (!exists(readLink(newItemPath))) { // reading the symbolic link failed log.vdebug("Reading the symbolic link target failed ........ "); - log.logAndNotify("Skipping item - invalid symbolic link: ", path); + log.logAndNotify("Skipping item - invalid symbolic link: ", newItemPath); return; } } - - // path exists locally, is not a bad symbolic link - // Query DB for new remote item in specified path + + // Path exists locally, is not a bad symbolic link + // Test if this item is actually in-sync + // What is the source of this item data? string itemSource = "remote"; - if (isItemSynced(item, path, itemSource)) { - // file details from OneDrive and local file details in database are in-sync - log.vdebug("The item to sync is already present on the local file system and is in-sync with the local database"); + if (isItemSynced(newDatabaseItem, newItemPath, itemSource)) { + // Item details from OneDrive and local item details in database are in-sync + log.vdebug("The item to sync is already present on the local filesystem and is in-sync with what is reported online"); + log.vdebug("Update/Insert local database with item details"); + log.vdebug("item details to update/insert: ", newDatabaseItem); + itemDB.upsert(newDatabaseItem); return; } else { - // file is not in sync with the database - // is the local file technically 'newer' based on UTC timestamp? - SysTime localModifiedTime = timeLastModified(path).toUTC(); - SysTime itemModifiedTime = item.mtime; - // HACK: reduce time resolution to seconds before comparing + // Item details from OneDrive and local item details in database are NOT in-sync + log.vdebug("The item to sync exists locally but is NOT in the local database - otherwise this would be handled as changed item"); + + // Which object is newer? The local file or the remote file? + SysTime localModifiedTime = timeLastModified(newItemPath).toUTC(); + SysTime itemModifiedTime = newDatabaseItem.mtime; + // Reduce time resolution to seconds before comparing localModifiedTime.fracSecs = Duration.zero; itemModifiedTime.fracSecs = Duration.zero; - // is the local modified time greater than that from OneDrive? + // If we need to rename the file, what do we rename it to? + auto ext = extension(newItemPath); + auto renamedNewItemPath = newItemPath.chomp(ext) ~ "-" ~ deviceName ~ ext; + + // Is the local modified time greater than that from OneDrive? if (localModifiedTime > itemModifiedTime) { - // local file is newer than item on OneDrive based on file modified time + // Local file is newer than item on OneDrive based on file modified time // Is this item id in the database? - if (itemdb.idInLocalDatabase(item.driveId, item.id)){ + if (itemDB.idInLocalDatabase(newDatabaseItem.driveId, newDatabaseItem.id)) { // item id is in the database // no local rename // no download needed log.vlog("Local item modified time is newer based on UTC time conversion - keeping local item as this exists in the local database"); log.vdebug("Skipping OneDrive change as this is determined to be unwanted due to local item modified time being newer than OneDrive item and present in the sqlite database"); - return; } else { // item id is not in the database .. maybe a --resync ? - // Should this 'download' be skipped? - // Do we need to check for .nosync? Only if --check-for-nosync was passed in - if (cfg.getValueBool("check_nosync")) { - // need the parent path for this object - string parentPath = dirName(path); - if (exists(parentPath ~ "/.nosync")) { - log.vlog("Skipping downloading item - .nosync found in parent folder & --check-for-nosync is enabled: ", path); - // flag that this download failed, otherwise the 'item' is added to the database - then, as not present on the local disk, would get deleted from OneDrive - downloadFailed = true; - // clean up this partial file, otherwise every sync we will get theis warning - log.vlog("Removing previous partial file download due to .nosync found in parent folder & --check-for-nosync is enabled"); - safeRemove(path); - return; - } - } // file exists locally but is not in the sqlite database - maybe a failed download? log.vlog("Local item does not exist in local database - replacing with file from OneDrive - failed download?"); + // In a --resync scenario or if items.sqlite3 was deleted before startup we have zero way of knowing IF the local file is meant to be the right file + // To this pint we have passed the following checks: + // 1. Any client side filtering checks - this determined this is a file that is wanted + // 2. A file with the exact name exists locally + // 3. The local modified time > remote modified time + // 4. The id of the item from OneDrive is not in the database - // in a --resync scenario or if items.sqlite3 was deleted before startup we have zero way of knowing IF the local file is meant to be the right file - // we have passed the following checks: - // 1. file exists locally - // 2. local modified time > remote modified time - // 3. id is not in the database - - auto ext = extension(path); - auto newPath = path.chomp(ext) ~ "-" ~ deviceName ~ ext; - // has the user configured to IGNORE local data protection rules? + // Has the user configured to IGNORE local data protection rules? if (bypassDataPreservation) { // The user has configured to ignore data safety checks and overwrite local data rather than preserve & rename - log.vlog("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: ", path); + log.vlog("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: ", newItemPath); } else { // local data protection is configured, renaming local file - log.vlog("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ", path, " -> ", newPath); + log.log("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ", newItemPath, " -> ", renamedNewItemPath); // perform the rename action of the local file if (!dryRun) { - safeRename(path); + // Perform the local rename of the existing local file + safeRename(newItemPath, renamedNewItemPath, dryRun); } else { - // Expectation here is that there is a new file locally (newPath) however as we don't create this, the "new file" will not be uploaded as it does not exist + // Expectation here is that there is a new file locally (renamedNewItemPath) however as we don't create this, the "new file" will not be uploaded as it does not exist log.vdebug("DRY-RUN: Skipping local file rename"); } } - } } else { - // remote file is newer than local item + // Remote file is newer than the existing local item log.vlog("Remote item modified time is newer based on UTC time conversion"); // correct message, remote item is newer log.vdebug("localModifiedTime (local file): ", localModifiedTime); log.vdebug("itemModifiedTime (OneDrive item): ", itemModifiedTime); - auto ext = extension(path); - auto newPath = path.chomp(ext) ~ "-" ~ deviceName ~ ext; - - // has the user configured to IGNORE local data protection rules? + // Has the user configured to IGNORE local data protection rules? if (bypassDataPreservation) { // The user has configured to ignore data safety checks and overwrite local data rather than preserve & rename - log.vlog("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: ", path); + log.vlog("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: ", newItemPath); } else { // local data protection is configured, renaming local file - log.vlog("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent data loss: ", path, " -> ", newPath); + log.vlog("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent data loss: ", newItemPath, " -> ", renamedNewItemPath); // perform the rename action of the local file if (!dryRun) { - safeRename(path); + // Perform the local rename of the existing local file + safeRename(newItemPath, renamedNewItemPath, dryRun); } else { - // Expectation here is that there is a new file locally (newPath) however as we don't create this, the "new file" will not be uploaded as it does not exist + // Expectation here is that there is a new file locally (renamedNewItemPath) however as we don't create this, the "new file" will not be uploaded as it does not exist log.vdebug("DRY-RUN: Skipping local file rename"); } } } } - } else { - // Path does not exist locally - this will be a new file download or folder creation - - // Should this 'download' be skipped due to 'skip_dir' directive - if (cfg.getValueString("skip_dir") != "") { - string pathToCheck; - // does the path start with '/'? - if (!startsWith(path, "/")){ - // path does not start with '/', but we need to check skip_dir entries with and without '/' - // so always make sure we are checking a path with '/' - // If this is a file, we need to check the parent path - if (item.type == ItemType.file) { - // use parent path and add '/' - pathToCheck = '/' ~ dirName(path); - } else { - // use path and add '/' - pathToCheck = '/' ~ path; - } - } - - // perform the check - if (selectiveSync.isDirNameExcluded(pathToCheck)) { - // this path should be skipped - if (item.type == ItemType.file) { - log.vlog("Skipping item - file path is excluded by skip_dir config: ", path); - } else { - log.vlog("Skipping item - excluded by skip_dir config: ", path); - } - // flag that this download failed, otherwise the 'item' is added to the database - then, as not present on the local disk, would get deleted from OneDrive - downloadFailed = true; - return; - } - } - - // Should this 'download' be skipped due to nosync directive? - // Do we need to check for .nosync? Only if --check-for-nosync was passed in - if (cfg.getValueBool("check_nosync")) { - // need the parent path for this object - string parentPath = dirName(path); - if (exists(parentPath ~ "/.nosync")) { - log.vlog("Skipping downloading item - .nosync found in parent folder & --check-for-nosync is enabled: ", path); - // flag that this download failed, otherwise the 'item' is added to the database - then, as not present on the local disk, would get deleted from OneDrive - downloadFailed = true; - return; - } - } - } - - // how to handle this item? - final switch (item.type) { - case ItemType.file: - downloadFileItem(item, path); - if (dryRun) { - // we dont download the file, but we need to track that we 'faked it' - idsFaked ~= [item.driveId, item.id]; - } - break; - case ItemType.dir: - case ItemType.remote: - log.log("Creating local directory: ", path); - - // Issue #658 handling - is sync_list in use? - if (syncListConfigured) { - // sync_list configured and in use - // path to create was previously checked if this should be included / excluded. No need to check again. - log.vdebug("Issue #658 handling"); - setOneDriveFullScanTrigger(); - } - - // Issue #865 handling - is skip_dir in use? - if (cfg.getValueString("skip_dir") != "") { - // we have some entries in skip_dir - // path to create was previously checked if this should be included / excluded. No need to check again. - log.vdebug("Issue #865 handling"); - setOneDriveFullScanTrigger(); - } + } - if (!dryRun) { - try { - // Does the path exist locally? - if (!exists(path)) { + // Path does not exist locally (should not exist locally if renamed file) - this will be a new file download or new folder creation + // How to handle this Potentially New Local Item JSON ? + final switch (newDatabaseItem.type) { + case ItemType.file: + // Add to the items to download array for processing + fileJSONItemsToDownload ~= onedriveJSONItem; + break; + case ItemType.dir: + case ItemType.remote: + log.log("Creating local directory: ", newItemPath); + if (!dryRun) { + try { // Create the new directory - log.vdebug("Requested path does not exist, creating directory structure: ", path); - mkdirRecurse(path); + log.vdebug("Requested path does not exist, creating directory structure: ", newItemPath); + mkdirRecurse(newItemPath); // Configure the applicable permissions for the folder - log.vdebug("Setting directory permissions for: ", path); - path.setAttributes(cfg.returnRequiredDirectoryPermisions()); + log.vdebug("Setting directory permissions for: ", newItemPath); + newItemPath.setAttributes(appConfig.returnRequiredDirectoryPermisions()); // Update the time of the folder to match the last modified time as is provided by OneDrive // If there are any files then downloaded into this folder, the last modified time will get // updated by the local Operating System with the latest timestamp - as this is normal operation // as the directory has been modified - log.vdebug("Setting directory lastModifiedDateTime for: ", path , " to ", item.mtime); - setTimes(path, item.mtime, item.mtime); + log.vdebug("Setting directory lastModifiedDateTime for: ", newItemPath , " to ", newDatabaseItem.mtime); + log.vdebug("Calling setTimes() for this file: ", newItemPath); + setTimes(newItemPath, newDatabaseItem.mtime, newDatabaseItem.mtime); + // Save the item to the database + saveItem(onedriveJSONItem); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - // flag that this failed - downloadFailed = true; - return; + } else { + // we dont create the directory, but we need to track that we 'faked it' + idsFaked ~= [newDatabaseItem.driveId, newDatabaseItem.id]; + // Save the item to the dry-run database + saveItem(onedriveJSONItem); } - } else { - // we dont create the directory, but we need to track that we 'faked it' - idsFaked ~= [item.driveId, item.id]; - } - break; + break; + case ItemType.unknown: + // Unknown type - we dont action or sync these items + break; } } - - // update a local item - // the local item is assumed to be in sync with the local db - private void applyChangedItem(Item oldItem, string oldPath, Item newItem, string newPath) - { - assert(oldItem.driveId == newItem.driveId); - assert(oldItem.id == newItem.id); - assert(oldItem.type == newItem.type); - assert(oldItem.remoteDriveId == newItem.remoteDriveId); - assert(oldItem.remoteId == newItem.remoteId); - - if (oldItem.eTag != newItem.eTag) { - // handle changed name/path - if (oldPath != newPath) { - log.log("Moving ", oldPath, " to ", newPath); - if (exists(newPath)) { - Item localNewItem; - if (itemdb.selectByPath(newPath, defaultDriveId, localNewItem)) { - // Query DB for new local item in specified path + + // If the JSON item IS in the database, this will be an update to an existing in-sync item + void applyPotentiallyChangedItem(Item existingDatabaseItem, string existingItemPath, Item changedOneDriveItem, string changedItemPath, JSONValue onedriveJSONItem) { + + // If we are moving the item, we do not need to download it again + bool itemWasMoved = false; + + // Do we need to actually update the database with the details that were provided by the OneDrive API? + // Calculate these time items from the provided items + SysTime existingItemModifiedTime = existingDatabaseItem.mtime; + existingItemModifiedTime.fracSecs = Duration.zero; + SysTime changedOneDriveItemModifiedTime = changedOneDriveItem.mtime; + changedOneDriveItemModifiedTime.fracSecs = Duration.zero; + + if (existingDatabaseItem.eTag != changedOneDriveItem.eTag) { + // The eTag has changed to what we previously cached + if (existingItemPath != changedItemPath) { + // Log that we are changing / moving an item to a new name + log.log("Moving ", existingItemPath, " to ", changedItemPath); + // Is the destination path empty .. or does something exist at that location? + if (exists(changedItemPath)) { + // Destination we are moving to exists ... + Item changedLocalItem; + // Query DB for this changed item in specified path that exists and see if it is in-sync + if (itemDB.selectByPath(changedItemPath, changedOneDriveItem.driveId, changedLocalItem)) { + // The 'changedItemPath' is in the database string itemSource = "database"; - if (isItemSynced(localNewItem, newPath, itemSource)) { + if (isItemSynced(changedLocalItem, changedItemPath, itemSource)) { + // The destination item is in-sync log.vlog("Destination is in sync and will be overwritten"); } else { - // TODO: force remote sync by deleting local item - log.vlog("The destination is occupied, renaming the conflicting file..."); - if (!dryRun) { - safeRename(newPath); - } + // The destination item is different + log.vlog("The destination is occupied with a different item, renaming the conflicting file..."); + // Backup this item, passing in if we are performing a --dry-run or not + safeBackup(changedItemPath, dryRun); } } else { - // to be overwritten item is not already in the itemdb, so it should - // be synced. Do a safe rename here, too. - // TODO: force remote sync by deleting local item - log.vlog("The destination is occupied by new file, renaming the conflicting file..."); - if (!dryRun) { - safeRename(newPath); - } + // The to be overwritten item is not already in the itemdb, so it should saved to avoid data loss + log.vlog("The destination is occupied by an existing un-synced file, renaming the conflicting file..."); + // Backup this item, passing in if we are performing a --dry-run or not + safeBackup(changedItemPath, dryRun); } } - // try and rename path, catch exception + + // Try and rename path, catch any exception generated try { - log.vdebug("Calling rename(oldPath, newPath)"); - if (!dryRun) { - // rename physical path on disk - rename(oldPath, newPath); - } else { - // track this as a faked id item - idsFaked ~= [newItem.driveId, newItem.id]; - // we also need to track that we did not rename this path - pathsRenamed ~= [oldPath]; + // Rename this item, passing in if we are performing a --dry-run or not + safeRename(existingItemPath, changedItemPath, dryRun); + + // If the item is a file, make sure that the local timestamp now is the same as the timestamp online + // Otherwise when we do the DB check, the move on the file system, the file technically has a newer timestamp + // which is 'correct' .. but we need to report locally the online timestamp here as the move was made online + if (changedOneDriveItem.type == ItemType.file) { + setTimes(changedItemPath, changedOneDriveItem.mtime, changedOneDriveItem.mtime); + } + + // Flag that the item was moved | renamed + itemWasMoved = true; + + // If we are in a --dry-run situation, the actual rename did not occur - but we need to track like it did + if (dryRun) { + // Track this as a faked id item + idsFaked ~= [changedOneDriveItem.driveId, changedOneDriveItem.id]; + // We also need to track that we did not rename this path + pathsRenamed ~= [existingItemPath]; } } catch (FileException e) { // display the error message displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } } - // handle changed content and mtime - // HACK: use mtime+hash instead of cTag because of https://github.com/OneDrive/onedrive-api-docs/issues/765 - if (newItem.type == ItemType.file && oldItem.mtime != newItem.mtime && !testFileHash(newPath, newItem)) { - downloadFileItem(newItem, newPath); - } - // handle changed time - if (newItem.type == ItemType.file && oldItem.mtime != newItem.mtime) { - try { - log.vdebug("Calling setTimes() for this file: ", newPath); - setTimes(newPath, newItem.mtime, newItem.mtime); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + // What sort of changed item is this? + // Is it a file, and we did not move it .. + if ((changedOneDriveItem.type == ItemType.file) && (!itemWasMoved)) { + // The eTag is notorious for being 'changed' online by some backend Microsoft process + if (existingDatabaseItem.quickXorHash != changedOneDriveItem.quickXorHash) { + // Add to the items to download array for processing - the file hash we previously recorded is not the same as online + fileJSONItemsToDownload ~= onedriveJSONItem; + } else { + // If the timestamp is different, or we are running a client operational mode that does not support /delta queries - we have to update the DB with the details from OneDrive + // Unfortunatly because of the consequence of Nataional Cloud Deployments not supporting /delta queries, the application uses the local database to flag what is out-of-date / track changes + // This means that the constant disk writing to the database fix implemented with https://github.com/abraunegg/onedrive/pull/2004 cannot be utilised when using these operational modes + // as all records are touched / updated when performing the OneDrive sync operations. The impacted operational modes are: + // - National Cloud Deployments do not support /delta as a query + // - When using --single-directory + // - When using --download-only --cleanup-local-files + + // Is the last modified timestamp in the DB the same as the API data or are we running an operational mode where we simulated the /delta response? + if ((existingItemModifiedTime != changedOneDriveItemModifiedTime) || (generateSimulatedDeltaResponse)) { + // Save this item in the database + // Add to the local database + log.vdebug("Adding changed OneDrive Item to database: ", changedOneDriveItem); + itemDB.upsert(changedOneDriveItem); + } + } + } else { + // Save this item in the database + saveItem(onedriveJSONItem); + + // If the 'Add shortcut to My files' link was the item that was actually renamed .. we have to update our DB records + if (changedOneDriveItem.type == ItemType.remote) { + // Select remote item data from the database + Item existingRemoteDbItem; + itemDB.selectById(changedOneDriveItem.remoteDriveId, changedOneDriveItem.remoteId, existingRemoteDbItem); + // Update the 'name' in existingRemoteDbItem and save it back to the database + // This is the local name stored on disk that was just 'moved' + existingRemoteDbItem.name = changedOneDriveItem.name; + itemDB.upsert(existingRemoteDbItem); } } - } - } - - // downloads a File resource - private void downloadFileItem(const ref Item item, const(string) path) - { - static import std.exception; - assert(item.type == ItemType.file); - write("Downloading file ", path, " ... "); - JSONValue fileDetails; + } else { + // The existingDatabaseItem.eTag == changedOneDriveItem.eTag .. nothing has changed eTag wise + + // If the timestamp is different, or we are running a client operational mode that does not support /delta queries - we have to update the DB with the details from OneDrive + // Unfortunatly because of the consequence of Nataional Cloud Deployments not supporting /delta queries, the application uses the local database to flag what is out-of-date / track changes + // This means that the constant disk writing to the database fix implemented with https://github.com/abraunegg/onedrive/pull/2004 cannot be utilised when using these operational modes + // as all records are touched / updated when performing the OneDrive sync operations. The impacted operational modes are: + // - National Cloud Deployments do not support /delta as a query + // - When using --single-directory + // - When using --download-only --cleanup-local-files - try { - fileDetails = onedrive.getFileDetails(item.driveId, item.id); - } catch (OneDriveException e) { - log.error("ERROR: Query of OneDrive for file details failed"); - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - downloadFailed = true; - return; + // Is the last modified timestamp in the DB the same as the API data or are we running an operational mode where we simulated the /delta response? + if ((existingItemModifiedTime != changedOneDriveItemModifiedTime) || (generateSimulatedDeltaResponse)) { + // Database update needed for this item because our local record is out-of-date + // Add to the local database + log.vdebug("Adding changed OneDrive Item to database: ", changedOneDriveItem); + itemDB.upsert(changedOneDriveItem); } } + } + + // Download new file items as identified + void downloadOneDriveItems() { + // Lets deal with all the JSON items that need to be downloaded in a batch process + ulong batchSize = appConfig.concurrentThreads; + ulong batchCount = (fileJSONItemsToDownload.length + batchSize - 1) / batchSize; + ulong batchesProcessed = 0; - // fileDetails has to be a valid JSON object - if (fileDetails.type() == JSONType.object){ - if (isMalware(fileDetails)){ - // OneDrive reports that this file is malware - log.error("ERROR: MALWARE DETECTED IN FILE - DOWNLOAD SKIPPED"); - // set global flag - malwareDetected = true; - return; - } - } else { - // Issue #550 handling - log.error("ERROR: Query of OneDrive for file details failed"); - log.vdebug("onedrive.getFileDetails call returned an invalid JSON Object"); - // We want to return, cant download - downloadFailed = true; - return; + foreach (chunk; fileJSONItemsToDownload.chunks(batchSize)) { + // send an array containing 'appConfig.concurrentThreads' (16) JSON items to download + downloadOneDriveItemsInParallel(chunk); + } + } + + // Download items in parallel + void downloadOneDriveItemsInParallel(JSONValue[] array) { + // This function recieved an array of 16 JSON items to download + foreach (i, onedriveJSONItem; taskPool.parallel(array)) { + // Take each JSON item and + downloadFileItem(onedriveJSONItem); } + } + + // Perform the actual download of an object from OneDrive + void downloadFileItem(JSONValue onedriveJSONItem) { + + bool downloadFailed = false; + string OneDriveFileXORHash; + string OneDriveFileSHA256Hash; + ulong jsonFileSize = 0; - if (!dryRun) { - ulong onlineFileSize = 0; - string OneDriveFileHash; - - // fileDetails should be a valid JSON due to prior check - if (hasFileSize(fileDetails)) { - // Use the configured onlineFileSize as reported by OneDrive - onlineFileSize = fileDetails["size"].integer; + // Download item specifics + string downloadDriveId = onedriveJSONItem["parentReference"]["driveId"].str; + string downloadParentId = onedriveJSONItem["parentReference"]["id"].str; + string downloadItemName = onedriveJSONItem["name"].str; + string downloadItemId = onedriveJSONItem["id"].str; + + // Calculate this items path + string newItemPath = computeItemPath(downloadDriveId, downloadParentId) ~ "/" ~ downloadItemName; + log.vdebug("New Item calculated full path is: ", newItemPath); + + // Is the item reported as Malware ? + if (isMalware(onedriveJSONItem)){ + // OneDrive reports that this file is malware + log.error("ERROR: MALWARE DETECTED IN FILE - DOWNLOAD SKIPPED: ", newItemPath); + downloadFailed = true; + } else { + // Grab this file's filesize + if (hasFileSize(onedriveJSONItem)) { + // Use the configured filesize as reported by OneDrive + jsonFileSize = onedriveJSONItem["size"].integer; } else { // filesize missing - log.vdebug("WARNING: fileDetails['size'] is missing"); + log.vdebug("WARNING: onedriveJSONItem['size'] is missing"); } - - if (hasHashes(fileDetails)) { + + // Configure the hashes for comparison post download + if (hasHashes(onedriveJSONItem)) { // File details returned hash details // QuickXorHash - if (hasQuickXorHash(fileDetails)) { - // Use the configured quickXorHash as reported by OneDrive - if (fileDetails["file"]["hashes"]["quickXorHash"].str != "") { - OneDriveFileHash = fileDetails["file"]["hashes"]["quickXorHash"].str; + if (hasQuickXorHash(onedriveJSONItem)) { + // Use the provided quickXorHash as reported by OneDrive + if (onedriveJSONItem["file"]["hashes"]["quickXorHash"].str != "") { + OneDriveFileXORHash = onedriveJSONItem["file"]["hashes"]["quickXorHash"].str; } } else { - // Check for sha256Hash as quickXorHash did not exist - if (hasSHA256Hash(fileDetails)) { - // Use the configured sha256Hash as reported by OneDrive - if (fileDetails["file"]["hashes"]["sha256Hash"].str != "") { - OneDriveFileHash = fileDetails["file"]["hashes"]["sha256Hash"].str; + // Fallback: Check for SHA256Hash + if (hasSHA256Hash(onedriveJSONItem)) { + // Use the provided sha256Hash as reported by OneDrive + if (onedriveJSONItem["file"]["hashes"]["sha256Hash"].str != "") { + OneDriveFileSHA256Hash = onedriveJSONItem["file"]["hashes"]["sha256Hash"].str; } } } } else { // file hash data missing - log.vdebug("WARNING: fileDetails['file']['hashes'] is missing - unable to compare file hash after download"); + log.vdebug("WARNING: onedriveJSONItem['file']['hashes'] is missing - unable to compare file hash after download"); } - + // Is there enough free space locally to download the file // - We can use '.' here as we change the current working directory to the configured 'sync_dir' ulong localActualFreeSpace = to!ulong(getAvailableDiskSpace(".")); // So that we are not responsible in making the disk 100% full if we can download the file, compare the current available space against the reservation set and file size // The reservation value is user configurable in the config file, 50MB by default - ulong freeSpaceReservation = cfg.getValueLong("space_reservation"); + ulong freeSpaceReservation = appConfig.getValueLong("space_reservation"); // debug output log.vdebug("Local Disk Space Actual: ", localActualFreeSpace); log.vdebug("Free Space Reservation: ", freeSpaceReservation); - log.vdebug("File Size to Download: ", onlineFileSize); + log.vdebug("File Size to Download: ", jsonFileSize); - // calculate if we can download file - if ((localActualFreeSpace < freeSpaceReservation) || (onlineFileSize > localActualFreeSpace)) { + // Calculate if we can actually download file - is there enough free space? + if ((localActualFreeSpace < freeSpaceReservation) || (jsonFileSize > localActualFreeSpace)) { // localActualFreeSpace is less than freeSpaceReservation .. insufficient free space - // onlineFileSize is greater than localActualFreeSpace .. insufficient free space - writeln("failed!"); + // jsonFileSize is greater than localActualFreeSpace .. insufficient free space + log.log("Downloading file ", newItemPath, " ... failed!"); log.log("Insufficient local disk space to download file"); downloadFailed = true; - return; - } - - // Attempt to download the file - try { - onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); - } catch (OneDriveException e) { - log.vdebug("onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); generated a OneDriveException"); - // 408 = Request Time Out - // 429 = Too Many Requests - need to delay - if (e.httpStatusCode == 408) { - // 408 error handling - request time out - // https://github.com/abraunegg/onedrive/issues/694 - // Back off & retry with incremental delay - int retryCount = 10; - int retryAttempts = 1; - int backoffInterval = 2; - while (retryAttempts < retryCount){ - // retry in 2,4,8,16,32,64,128,256,512,1024 seconds - Thread.sleep(dur!"seconds"(retryAttempts*backoffInterval)); - try { - onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); - // successful download - retryAttempts = retryCount; - } catch (OneDriveException e) { - log.vdebug("onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); generated a OneDriveException"); - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 408)) { - // If another 408 .. - if (e.httpStatusCode == 408) { - // Increment & loop around - log.vdebug("HTTP 408 generated - incrementing retryAttempts"); - retryAttempts++; - } - // If a 429 .. - if (e.httpStatusCode == 429) { - // Increment & loop around - handleOneDriveThrottleRequest(); - log.vdebug("HTTP 429 generated - incrementing retryAttempts"); - retryAttempts++; - } - } else { - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + } else { + // If we are in a --dry-run situation - if not, actually perform the download + if (!dryRun) { + // Attempt to download the file as there is enough free space locally + OneDriveApi downloadFileOneDriveApiInstance; + downloadFileOneDriveApiInstance = new OneDriveApi(appConfig); + try { + downloadFileOneDriveApiInstance.initialise(); + downloadFileOneDriveApiInstance.downloadById(downloadDriveId, downloadItemId, newItemPath, jsonFileSize); + downloadFileOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(downloadFileOneDriveApiInstance); + } catch (OneDriveException exception) { + log.vdebug("downloadFileOneDriveApiInstance.downloadById(downloadDriveId, downloadItemId, newItemPath, jsonFileSize); generated a OneDriveException"); + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(downloadFileOneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to download an item from OneDrive - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.vdebug("Retrying Function: ", thisFunctionName); + downloadFileItem(onedriveJSONItem); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); } + + } catch (FileException e) { + // There was a file system error + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + downloadFailed = true; + } catch (ErrnoException e) { + // There was a file system error + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + downloadFailed = true; } - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests) - // https://github.com/abraunegg/onedrive/issues/133 - int retryCount = 10; - int retryAttempts = 1; - while (retryAttempts < retryCount){ - // retry after waiting the timeout value from the 429 HTTP response header Retry-After - handleOneDriveThrottleRequest(); - try { - onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); - // successful download - retryAttempts = retryCount; - } catch (OneDriveException e) { - log.vdebug("onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); generated a OneDriveException"); - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 408)) { - // If another 408 .. - if (e.httpStatusCode == 408) { - // Increment & loop around - log.vdebug("HTTP 408 generated - incrementing retryAttempts"); - retryAttempts++; - } - // If a 429 .. - if (e.httpStatusCode == 429) { - // Increment & loop around - handleOneDriveThrottleRequest(); - log.vdebug("HTTP 429 generated - incrementing retryAttempts"); - retryAttempts++; + + // If we get to this point, something was downloaded .. does it match what we expected? + if (exists(newItemPath)) { + // When downloading some files from SharePoint, the OneDrive API reports one file size, + // but the SharePoint HTTP Server sends a totally different byte count for the same file + // we have implemented --disable-download-validation to disable these checks + + if (!disableDownloadValidation) { + // A 'file' was downloaded - does what we downloaded = reported jsonFileSize or if there is some sort of funky local disk compression going on + // Does the file hash OneDrive reports match what we have locally? + string onlineFileHash; + string downloadedFileHash; + ulong downloadFileSize = getSize(newItemPath); + + if (!OneDriveFileXORHash.empty) { + onlineFileHash = OneDriveFileXORHash; + // Calculate the QuickXOHash for this file + downloadedFileHash = computeQuickXorHash(newItemPath); + } else { + onlineFileHash = OneDriveFileSHA256Hash; + // Fallback: Calculate the SHA256 Hash for this file + downloadedFileHash = computeSHA256Hash(newItemPath); + } + + if ((downloadFileSize == jsonFileSize) && (downloadedFileHash == onlineFileHash)) { + // Downloaded file matches size and hash + log.vdebug("Downloaded file matches reported size and reported file hash"); + try { + // get the mtime from the JSON data + SysTime itemModifiedTime; + if (isItemRemote(onedriveJSONItem)) { + // remote file item + itemModifiedTime = SysTime.fromISOExtString(onedriveJSONItem["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"].str); + } else { + // not a remote item + itemModifiedTime = SysTime.fromISOExtString(onedriveJSONItem["fileSystemInfo"]["lastModifiedDateTime"].str); + } + + // set the correct time on the downloaded file + log.vdebug("Calling setTimes() for this file: ", newItemPath); + setTimes(newItemPath, itemModifiedTime, itemModifiedTime); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } } else { - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + // Downloaded file does not match size or hash .. which is it? + bool downloadValueMismatch = false; + // Size error? + if (downloadFileSize != jsonFileSize) { + // downloaded file size does not match + downloadValueMismatch = true; + log.vdebug("Actual file size on disk: ", downloadFileSize); + log.vdebug("OneDrive API reported size: ", jsonFileSize); + log.error("ERROR: File download size mis-match. Increase logging verbosity to determine why."); + } + // Hash Error + if (downloadedFileHash != onlineFileHash) { + // downloaded file hash does not match + downloadValueMismatch = true; + log.vdebug("Actual local file hash: ", downloadedFileHash); + log.vdebug("OneDrive API reported hash: ", onlineFileHash); + log.error("ERROR: File download hash mis-match. Increase logging verbosity to determine why."); + } + // .heic data loss check + // - https://github.com/abraunegg/onedrive/issues/2471 + // - https://github.com/OneDrive/onedrive-api-docs/issues/1532 + // - https://github.com/OneDrive/onedrive-api-docs/issues/1723 + if (downloadValueMismatch && (toLower(extension(newItemPath)) == ".heic")) { + // Need to display a message to the user that they have experienced data loss + log.error("DATA-LOSS: File downloaded has experienced data loss due to a Microsoft OneDrive API bug. DO NOT DELETE THIS FILE ONLINE."); + log.vlog(" Please read https://github.com/OneDrive/onedrive-api-docs/issues/1723 for more details."); + } + + // Add some workaround messaging for SharePoint + if (appConfig.accountType == "documentLibrary"){ + // It has been seen where SharePoint / OneDrive API reports one size via the JSON + // but the content length and file size written to disk is totally different - example: + // From JSON: "size": 17133 + // From HTTPS Server: < Content-Length: 19340 + // with no logical reason for the difference, except for a 302 redirect before file download + log.error("INFO: It is most likely that a SharePoint OneDrive API issue is the root cause. Add --disable-download-validation to work around this issue but downloaded data integrity cannot be guaranteed."); + } else { + // other account types + log.error("INFO: Potentially add --disable-download-validation to work around this issue but downloaded data integrity cannot be guaranteed."); + } + // We do not want this local file to remain on the local file system as it failed the integrity checks + log.log("Removing file ", newItemPath, " due to failed integrity checks"); + if (!dryRun) { + safeRemove(newItemPath); + } + downloadFailed = true; } - } + } else { + // Download validation checks were disabled + log.vdebug("Downloaded file validation disabled due to --disable-download-validation"); + log.vlog("WARNING: Skipping download integrity check for: ", newItemPath); + } // end of (!disableDownloadValidation) + } else { + log.error("ERROR: File failed to download. Increase logging verbosity to determine why."); + downloadFailed = true; } } - } catch (FileException e) { - // There was a file system error - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - downloadFailed = true; - return; - } catch (std.exception.ErrnoException e) { - // There was a file system error - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - downloadFailed = true; - return; } - // file has to have downloaded in order to set the times / data for the file - if (exists(path)) { - // When downloading some files from SharePoint, the OneDrive API reports one file size, but the SharePoint HTTP Server sends a totally different byte count - // for the same file - // we have implemented --disable-download-validation to disable these checks + + // File should have been downloaded + if (!downloadFailed) { + // Download did not fail + log.log("Downloading file ", newItemPath, " ... done"); + // Save this item into the database + saveItem(onedriveJSONItem); - if (!disableDownloadValidation) { - // A 'file' was downloaded - does what we downloaded = reported onlineFileSize or if there is some sort of funky local disk compression going on - // does the file hash OneDrive reports match what we have locally? - string quickXorHash = computeQuickXorHash(path); - // Compute the local file size - ulong localFileSize = getSize(path); - - if ((localFileSize == onlineFileSize) || (OneDriveFileHash == quickXorHash)) { - // downloaded matches either size or hash - log.vdebug("Downloaded file matches reported size and or reported file hash"); - try { - log.vdebug("Calling setTimes() for this file: ", path); - setTimes(path, item.mtime, item.mtime); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } else { - // size error? - if (localFileSize != onlineFileSize) { - // downloaded file size does not match - log.vdebug("Actual file size on disk: ", localFileSize); - log.vdebug("OneDrive API reported size: ", onlineFileSize); - log.error("ERROR: File download size mis-match. Increase logging verbosity to determine why."); - } - // hash error? - if (OneDriveFileHash != quickXorHash) { - // downloaded file hash does not match - log.vdebug("Actual local file hash: ", quickXorHash); - log.vdebug("OneDrive API reported hash: ", OneDriveFileHash); - log.error("ERROR: File download hash mis-match. Increase logging verbosity to determine why."); - } - // add some workaround messaging - if (accountType == "documentLibrary"){ - // It has been seen where SharePoint / OneDrive API reports one size via the JSON - // but the content length and file size written to disk is totally different - example: - // From JSON: "size": 17133 - // From HTTPS Server: < Content-Length: 19340 - // with no logical reason for the difference, except for a 302 redirect before file download - log.error("INFO: It is most likely that a SharePoint OneDrive API issue is the root cause. Add --disable-download-validation to work around this issue but downloaded data integrity cannot be guaranteed."); - } else { - // other account types - log.error("INFO: Potentially add --disable-download-validation to work around this issue but downloaded data integrity cannot be guaranteed."); - } - - // we do not want this local file to remain on the local file system - safeRemove(path); - downloadFailed = true; - return; - } - } else { - // download checks have been disabled - log.vdebug("Downloaded file validation disabled due to --disable-download-validation "); + /** + log.vdebug("Inserting new item details to local database"); + // What was the item that was saved + log.vdebug("item details: ", newDatabaseItem); + itemDB.upsert(newDatabaseItem); + **/ + + + // If we are in a --dry-run situation - if we are, we need to track that we faked the download + if (dryRun) { + // track that we 'faked it' + idsFaked ~= [downloadDriveId, downloadItemId]; } } else { - log.error("ERROR: File failed to download. Increase logging verbosity to determine why."); - downloadFailed = true; - return; + // Output download failed + log.log("Downloading file ", newItemPath, " ... failed!"); + // Add the path to a list of items that failed to download + fileDownloadFailures ~= newItemPath; } } - - if (!downloadFailed) { - writeln("done."); - log.fileOnly("Downloading file ", path, " ... done."); - } else { - writeln("failed!"); - log.fileOnly("Downloading file ", path, " ... failed!"); - } } - - // returns true if the given item corresponds to the local one - private bool isItemSynced(const ref Item item, const(string) path, string itemSource) - { + + // Test if the given item is in-sync. Returns true if the given item corresponds to the local one + bool isItemSynced(Item item, string path, string itemSource) { + if (!exists(path)) return false; final switch (item.type) { case ItemType.file: @@ -3232,20 +2183,41 @@ final class SyncEngine // local file is readable SysTime localModifiedTime = timeLastModified(path).toUTC(); SysTime itemModifiedTime = item.mtime; - // HACK: reduce time resolution to seconds before comparing + // Reduce time resolution to seconds before comparing localModifiedTime.fracSecs = Duration.zero; itemModifiedTime.fracSecs = Duration.zero; if (localModifiedTime == itemModifiedTime) { return true; } else { - log.vlog("The local item has a different modified time ", localModifiedTime, " when compared to ", itemSource, " modified time ", itemModifiedTime); + log.vlog("Local item time discrepancy detected: ", path); + log.vlog("This local item has a different modified time ", localModifiedTime, " when compared to ", itemSource, " modified time ", itemModifiedTime); // The file has been modified ... is the hash the same? // Test the file hash as the date / time stamp is different - // Generating a hash is computationally expensive - only generate the hash if timestamp was modified + // Generating a hash is computationally expensive - we only generate the hash if timestamp was different if (testFileHash(path, item)) { + // The hash is the same .. so we need to fix-up the timestamp depending on where it is wrong + log.vlog("Local item has the same hash value as the item online - correcting timestamp"); + // Test if the local timestamp is newer + if (localModifiedTime > itemModifiedTime) { + // The source of the out-of-date timestamp was OneDrive and this needs to be corrected to avoid always generating a hash test if timestamp is different + log.vlog("The source of the incorrect timestamp was OneDrive online - correcting timestamp online"); + if (!dryRun) { + // Attempt to update the online date time stamp + uploadLastModifiedTime(item.driveId, item.id, localModifiedTime.toUTC(), item.eTag); + } + } else { + // The source of the out-of-date timestamp was the local file and this needs to be corrected to avoid always generating a hash test if timestamp is different + log.vlog("The source of the incorrect timestamp was the local file - correcting timestamp locally"); + if (!dryRun) { + log.vdebug("Calling setTimes() for this file: ", path); + setTimes(path, item.mtime, item.mtime); + } + } return true; } else { + // The hash is different so the content of the file has to be different as to what is stored online log.vlog("The local item has a different hash when compared to ", itemSource, " item hash"); + return false; } } } else { @@ -3265,1101 +2237,757 @@ final class SyncEngine log.vlog("The local item is a file but should be a directory"); } break; + case ItemType.unknown: + // Unknown type - return true but we dont action or sync these items + return true; } return false; } - - private void deleteItems() - { - foreach_reverse (i; idsToDelete) { - Item item; - string path; - if (!itemdb.selectById(i[0], i[1], item)) continue; // check if the item is in the db - // Compute this item path - path = computeItemPath(i[0], i[1]); - // Try to delete item object - log.log("Trying to delete item ", path); - if (!dryRun) { - // Actually process the database entry removal - itemdb.deleteById(item.driveId, item.id); - if (item.remoteDriveId != null) { - // delete the linked remote folder - itemdb.deleteById(item.remoteDriveId, item.remoteId); + + // Get the /delta data using the provided details + JSONValue getDeltaChangesByItemId(string selectedDriveId, string selectedItemId, string providedDeltaLink, OneDriveApi getDeltaQueryOneDriveApiInstance) { + + // Function variables + JSONValue deltaChangesBundle; + + // Get the /delta data for this account | driveId | deltaLink combination + log.vdebug("------------------------------------------------------------------"); + log.vdebug("selectedDriveId: ", selectedDriveId); + log.vdebug("selectedItemId: ", selectedItemId); + log.vdebug("providedDeltaLink: ", providedDeltaLink); + log.vdebug("------------------------------------------------------------------"); + + try { + deltaChangesBundle = getDeltaQueryOneDriveApiInstance.viewChangesByItemId(selectedDriveId, selectedItemId, providedDeltaLink); + } catch (OneDriveException exception) { + // caught an exception + log.vdebug("getDeltaQueryOneDriveApiInstance.viewChangesByItemId(selectedDriveId, selectedItemId, providedDeltaLink) generated a OneDriveException"); + + auto errorArray = splitLines(exception.msg); + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(getDeltaQueryOneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + log.log(errorArray[0], " when attempting to query OneDrive API for Delta Changes - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); } - } - bool needsRemoval = false; - if (exists(path)) { - // path exists on the local system - // make sure that the path refers to the correct item - Item pathItem; - if (itemdb.selectByPath(path, item.driveId, pathItem)) { - if (pathItem.id == item.id) { - needsRemoval = true; - } else { - log.log("Skipped due to id difference!"); - } + // dont retry request, loop back to calling function + log.vdebug("Looping back after failure"); + deltaChangesBundle = null; + } else { + // Default operation if not 408,429,503,504 errors + if (exception.httpStatusCode == 410) { + log.log("\nWARNING: The OneDrive API responded with an error that indicates the locally stored deltaLink value is invalid"); + // Essentially the 'providedDeltaLink' that we have stored is no longer available ... re-try without the stored deltaLink + log.log("WARNING: Retrying OneDrive API call without using the locally stored deltaLink value"); + // Configure an empty deltaLink + log.vdebug("Delta link expired for 'getDeltaQueryOneDriveApiInstance.viewChangesByItemId(selectedDriveId, selectedItemId, providedDeltaLink)', setting 'deltaLink = null'"); + string emptyDeltaLink = ""; + // retry with empty deltaLink + deltaChangesBundle = getDeltaQueryOneDriveApiInstance.viewChangesByItemId(selectedDriveId, selectedItemId, emptyDeltaLink); } else { - // item has disappeared completely - needsRemoval = true; - } - } - if (needsRemoval) { - log.log("Deleting item ", path); - if (!dryRun) { - if (isFile(path)) { - remove(path); - } else { - try { - // Remove any children of this path if they still exist - // Resolve 'Directory not empty' error when deleting local files - foreach (DirEntry child; dirEntries(path, SpanMode.depth, false)) { - attrIsDir(child.linkAttributes) ? rmdir(child.name) : remove(child.name); - } - // Remove the path now that it is empty of children - rmdirRecurse(path); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } + // display what the error is + log.log("CODING TO DO: Hitting this failure error output"); + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + deltaChangesBundle = null; } } } - if (!dryRun) { - // clean up idsToDelete - idsToDelete.length = 0; - assumeSafeAppend(idsToDelete); - } + return deltaChangesBundle; } - // scan the given directory for differences and new items - for use with --synchronize - void scanForDifferences(const(string) path) - { - // To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences? - string logPath; - if (path == ".") { - // get the configured sync_dir - logPath = buildNormalizedPath(cfg.getValueString("sync_dir")); - } else { - // use what was passed in - logPath = path; - } - - // If we are using --upload-only & --sync-shared-folders there is a possability that a 'new' local folder might - // be misinterpreted that it needs to be uploaded to the users default OneDrive DriveID rather than the requested / configured - // Shared Business Folder. In --resync scenarios, the DB information that tells that this Business Shared Folder does not exist, - // and in a --upload-only scenario will never exist, so the correct lookups are unable to be performed. - if ((exists(cfg.businessSharedFolderFilePath)) && (syncBusinessFolders) && (cfg.getValueBool("upload_only"))){ - // business_shared_folders file exists, --sync-shared-folders is enabled, --upload-only is enabled - log.vdebug("OneDrive Business --upload-only & --sync-shared-folders edge case triggered"); - handleUploadOnlyBusinessSharedFoldersEdgeCase(); - } + // Common code to handle a 408 or 429 response from the OneDrive API + void handleOneDriveThrottleRequest(OneDriveApi activeOneDriveApiInstance) { - // Are we configured to use a National Cloud Deployment - if (nationalCloudDeployment) { - // Select items that have a out-of-sync flag set - flagNationalCloudDeploymentOutOfSyncItems(); - } + // If OneDrive sends a status code 429 then this function will be used to process the Retry-After response header which contains the value by which we need to wait + log.vdebug("Handling a OneDrive HTTP 429 Response Code (Too Many Requests)"); + // Read in the Retry-After HTTP header as set and delay as per this value before retrying the request + auto retryAfterValue = activeOneDriveApiInstance.getRetryAfterValue(); + log.vdebug("Using Retry-After Value = ", retryAfterValue); - // scan for changes in the path provided - if (isDir(path)) { - // if this path is a directory, output this message. - // if a file, potentially leads to confusion as to what the client is actually doing - log.log("Uploading differences of ", logPath); - } + // HTTP request returned status code 429 (Too Many Requests) + // https://github.com/abraunegg/onedrive/issues/133 + // https://github.com/abraunegg/onedrive/issues/815 - Item item; - // For each unique OneDrive driveID we know about - foreach (driveId; driveIDsArray) { - log.vdebug("Processing DB entries for this driveId: ", driveId); - // Database scan of every item in DB for the given driveId based on the root parent for that drive - if ((syncBusinessFolders) && (driveId != defaultDriveId)) { - // There could be multiple shared folders all from this same driveId - are we doing a single directory sync? - if (cfg.getValueString("single_directory") != ""){ - // Limit the local filesystem check to just the requested directory - if (itemdb.selectByPath(path, driveId, item)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(item); - } - } else { - // check everything associated with each driveId we know about - foreach(dbItem; itemdb.selectByDriveId(driveId)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(dbItem); - } - } - } else { - if (itemdb.selectByPath(path, driveId, item)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(item); - } - } - } - - // scan for changes in the path provided - if (isDir(path)) { - // if this path is a directory, output this message. - // if a file, potentially leads to confusion as to what the client is actually doing - log.log("Uploading new items of ", logPath); + ulong delayBeforeRetry = 0; + if (retryAfterValue != 0) { + // Use the HTTP Response Header Value + delayBeforeRetry = retryAfterValue; + } else { + // Use a 120 second delay as a default given header value was zero + // This value is based on log files and data when determining correct process for 429 response handling + delayBeforeRetry = 120; + // Update that we are over-riding the provided value with a default + log.vdebug("HTTP Response Header retry-after value was 0 - Using a preconfigured default of: ", delayBeforeRetry); } - // Filesystem walk to find new files not uploaded - uploadNewItems(path); - // clean up idsToDelete only if --dry-run is set - if (dryRun) { - idsToDelete.length = 0; - assumeSafeAppend(idsToDelete); - } + // Sleep thread as per request + log.log("Thread sleeping due to 'HTTP request returned status code 429' - The request has been throttled"); + log.log("Sleeping for ", delayBeforeRetry, " seconds"); + Thread.sleep(dur!"seconds"(delayBeforeRetry)); + + // Reset retry-after value to zero as we have used this value now and it may be changed in the future to a different value + activeOneDriveApiInstance.resetRetryAfterValue(); } - // scan the given directory for differences only - for use with --monitor - void scanForDifferencesDatabaseScan(const(string) path) - { - // To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences? - string logPath; - if (path == ".") { - // get the configured sync_dir - logPath = buildNormalizedPath(cfg.getValueString("sync_dir")); + // If the JSON response is not correct JSON object, exit + void invalidJSONResponseFromOneDriveAPI() { + log.error("ERROR: Query of the OneDrive API returned an invalid JSON response"); + // Must exit + exit(-1); + } + + // Handle an unhandled API error + void defaultUnhandledHTTPErrorCode(OneDriveException exception) { + + // display error + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + // Must exit here + exit(-1); + } + + // Display the pertinant details of the sync engine + void displaySyncEngineDetails() { + + // Display accountType, defaultDriveId, defaultRootId & remainingFreeSpace for verbose logging purposes + //log.vlog("Application version: ", strip(import("version"))); + + string tempVersion = "v2.5.0-alpha-3" ~ " GitHub version: " ~ strip(import("version")); + log.vlog("Application version: ", tempVersion); + + log.vlog("Account Type: ", appConfig.accountType); + log.vlog("Default Drive ID: ", appConfig.defaultDriveId); + log.vlog("Default Root ID: ", appConfig.defaultRootId); + + // What do we display here for space remaining + if (appConfig.remainingFreeSpace > 0) { + // Display the actual value + log.vlog("Remaining Free Space: ", (appConfig.remainingFreeSpace/1024) , " KB"); } else { - // use what was passed in - logPath = path; + // zero or non-zero value or restricted + if (!appConfig.quotaRestricted){ + log.vlog("Remaining Free Space: 0 KB"); + } else { + log.vlog("Remaining Free Space: Not Available"); + } } + } + + // Query itemdb.computePath() and catch potential assert when DB consistency issue occurs + string computeItemPath(string thisDriveId, string thisItemId) { - // If we are using --upload-only & --sync-shared-folders there is a possability that a 'new' local folder might - // be misinterpreted that it needs to be uploaded to the users default OneDrive DriveID rather than the requested / configured - // Shared Business Folder. In --resync scenarios, the DB information that tells that this Business Shared Folder does not exist, - // and in a --upload-only scenario will never exist, so the correct lookups are unable to be performed. - if ((exists(cfg.businessSharedFolderFilePath)) && (syncBusinessFolders) && (cfg.getValueBool("upload_only"))){ - // business_shared_folders file exists, --sync-shared-folders is enabled, --upload-only is enabled - log.vdebug("OneDrive Business --upload-only & --sync-shared-folders edge case triggered"); - handleUploadOnlyBusinessSharedFoldersEdgeCase(); + // static declare this for this function + static import core.exception; + string calculatedPath; + log.vdebug("Attempting to calculate local filesystem path for ", thisDriveId, " and ", thisItemId); + try { + calculatedPath = itemDB.computePath(thisDriveId, thisItemId); + } catch (core.exception.AssertError) { + // broken tree in the database, we cant compute the path for this item id, exit + log.error("ERROR: A database consistency issue has been caught. A --resync is needed to rebuild the database."); + // Must exit here to preserve data + exit(-1); } - // Are we configured to use a National Cloud Deployment - if (nationalCloudDeployment) { - // Select items that have a out-of-sync flag set - flagNationalCloudDeploymentOutOfSyncItems(); - } + // return calculated path as string + return calculatedPath; + } + + // Try and compute the file hash for the given item + bool testFileHash(string path, Item item) { - // scan for changes in the path provided - if (isDir(path)) { - // if this path is a directory, output this message. - // if a file, potentially leads to confusion as to what the client is actually doing - log.vlog("Uploading differences of ", logPath); + // Generate QuickXORHash first before attempting to generate any other type of hash + if (item.quickXorHash) { + if (item.quickXorHash == computeQuickXorHash(path)) return true; + } else if (item.sha256Hash) { + if (item.sha256Hash == computeSHA256Hash(path)) return true; } - Item item; - // For each unique OneDrive driveID we know about - foreach (driveId; driveIDsArray) { - log.vdebug("Processing DB entries for this driveId: ", driveId); - // Database scan of every item in DB for the given driveId based on the root parent for that drive - if ((syncBusinessFolders) && (driveId != defaultDriveId)) { - // There could be multiple shared folders all from this same driveId - are we doing a single directory sync? - if (cfg.getValueString("single_directory") != ""){ - // Limit the local filesystem check to just the requested directory - if (itemdb.selectByPath(path, driveId, item)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(item); - } + return false; + } + + // Process items that need to be removed + void processDeleteItems() { + + foreach_reverse (i; idsToDelete) { + Item item; + string path; + if (!itemDB.selectById(i[0], i[1], item)) continue; // check if the item is in the db + // Compute this item path + path = computeItemPath(i[0], i[1]); + + // Log the action if the path exists .. it may of already been removed and this is a legacy array item + if (exists(path)) { + if (item.type == ItemType.file) { + log.log("Trying to delete file ", path); } else { - // check everything associated with each driveId we know about - foreach(dbItem; itemdb.selectByDriveId(driveId)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(dbItem); - } - } - } else { - if (itemdb.selectByPath(path, driveId, item)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(item); + log.log("Trying to delete directory ", path); } } - } - } - - void flagNationalCloudDeploymentOutOfSyncItems() { - // Any entry in the DB than is flagged as out-of-sync needs to be cleaned up locally first before we scan the entire DB - // Normally, this is done at the end of processing all /delta queries, however National Cloud Deployments do not support /delta as a query - // https://docs.microsoft.com/en-us/graph/deployments#supported-features - // Select items that have a out-of-sync flag set - foreach (driveId; driveIDsArray) { - // For each unique OneDrive driveID we know about - Item[] outOfSyncItems = itemdb.selectOutOfSyncItems(driveId); - foreach (item; outOfSyncItems) { - if (!dryRun) { - // clean up idsToDelete - idsToDelete.length = 0; - assumeSafeAppend(idsToDelete); - // flag to delete local file as it now is no longer in sync with OneDrive - log.vdebug("Flagging to delete local item as it now is no longer in sync with OneDrive"); - log.vdebug("item: ", item); - idsToDelete ~= [item.driveId, item.id]; - // delete items in idsToDelete - if (idsToDelete.length > 0) deleteItems(); + + // Process the database entry removal. In a --dry-run scenario, this is being done against a DB copy + itemDB.deleteById(item.driveId, item.id); + if (item.remoteDriveId != null) { + // delete the linked remote folder + itemDB.deleteById(item.remoteDriveId, item.remoteId); + } + + // Add to pathFakeDeletedArray + // We dont want to try and upload this item again, so we need to track this object + if (dryRun) { + // We need to add './' here so that it can be correctly searched to ensure it is not uploaded + string pathToAdd = "./" ~ path; + pathFakeDeletedArray ~= pathToAdd; + } + + bool needsRemoval = false; + if (exists(path)) { + // path exists on the local system + // make sure that the path refers to the correct item + Item pathItem; + if (itemDB.selectByPath(path, item.driveId, pathItem)) { + if (pathItem.id == item.id) { + needsRemoval = true; + } else { + log.log("Skipped due to id difference!"); + } + } else { + // item has disappeared completely + needsRemoval = true; } } - } - } - - void handleUploadOnlyBusinessSharedFoldersEdgeCase() { - // read in the business_shared_folders file contents - string[] businessSharedFoldersList; - // open file as read only - auto file = File(cfg.businessSharedFolderFilePath, "r"); - auto range = file.byLine(); - foreach (line; range) { - // Skip comments in file - if (line.length == 0 || line[0] == ';' || line[0] == '#') continue; - businessSharedFoldersList ~= buildNormalizedPath(line); - } - file.close(); - - // Query the GET /me/drive/sharedWithMe API - JSONValue graphQuery = onedrive.getSharedWithMe(); - if (graphQuery.type() == JSONType.object) { - if (count(graphQuery["value"].array) != 0) { - // Shared items returned - log.vdebug("onedrive.getSharedWithMe API Response: ", graphQuery); - foreach (searchResult; graphQuery["value"].array) { - // loop variables - string sharedFolderName; - string remoteParentDriveId; - string remoteParentItemId; - Item remoteItemRoot; - Item remoteItem; - - // is the shared item with us a 'folder' ? - // we only handle folders, not files or other items - if (isItemFolder(searchResult)) { - // Debug response output - log.vdebug("shared folder entry: ", searchResult); - sharedFolderName = searchResult["name"].str; - remoteParentDriveId = searchResult["remoteItem"]["parentReference"]["driveId"].str; - remoteParentItemId = searchResult["remoteItem"]["parentReference"]["id"].str; - - if (canFind(businessSharedFoldersList, sharedFolderName)) { - // Shared Folder matches what is in the shared folder list - log.vdebug("shared folder name matches business_shared_folders list item: ", sharedFolderName); - // Actions: - // 1. Add this remote item to the DB so that it can be queried - // 2. Add remoteParentDriveId to driveIDsArray so we have a record of it - - // Make JSON item DB compatible - remoteItem = makeItem(searchResult); - // Fix up entries, as we are manipulating the data - remoteItem.driveId = remoteParentDriveId; - remoteItem.eTag = ""; - remoteItem.cTag = ""; - remoteItem.parentId = defaultRootId; - remoteItem.remoteDriveId = ""; - remoteItem.remoteId = ""; - - // Build the remote root DB item - remoteItemRoot.driveId = remoteParentDriveId; - remoteItemRoot.id = defaultRootId; - remoteItemRoot.name = "root"; - remoteItemRoot.type = ItemType.dir; - remoteItemRoot.mtime = remoteItem.mtime; - remoteItemRoot.syncStatus = "Y"; - - // Add root remote item to the local database - log.vdebug("Adding remote folder root to database: ", remoteItemRoot); - itemdb.upsert(remoteItemRoot); - - // Add shared folder item to the local database - log.vdebug("Adding remote folder to database: ", remoteItem); - itemdb.upsert(remoteItem); - - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, remoteParentDriveId)) { - // Add this drive id to the array to search with - driveIDsArray ~= remoteParentDriveId; + if (needsRemoval) { + // Log the action + if (item.type == ItemType.file) { + log.log("Deleting file ", path); + } else { + log.log("Deleting directory ", path); + } + + // Perform the action + if (!dryRun) { + if (isFile(path)) { + remove(path); + } else { + try { + // Remove any children of this path if they still exist + // Resolve 'Directory not empty' error when deleting local files + foreach (DirEntry child; dirEntries(path, SpanMode.depth, false)) { + attrIsDir(child.linkAttributes) ? rmdir(child.name) : remove(child.name); } + // Remove the path now that it is empty of children + rmdirRecurse(path); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } } } } } + + if (!dryRun) { + // Cleanup array memory + idsToDelete = []; + } } - // scan the given directory for new items - for use with --monitor or --cleanup-local-files - void scanForDifferencesFilesystemScan(const(string) path) - { - // To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences? - string logPath; - if (path == ".") { - // get the configured sync_dir - logPath = buildNormalizedPath(cfg.getValueString("sync_dir")); + // Update the timestamp of an object online + void uploadLastModifiedTime(string driveId, string id, SysTime mtime, string eTag) { + + string itemModifiedTime; + itemModifiedTime = mtime.toISOExtString(); + JSONValue data = [ + "fileSystemInfo": JSONValue([ + "lastModifiedDateTime": itemModifiedTime + ]) + ]; + + // What eTag value do we use? + string eTagValue; + if (appConfig.accountType == "personal") { + eTagValue = null; } else { - // use what was passed in - logPath = path; + eTagValue = eTag; } - // scan for changes in the path provided - if (isDir(path)) { - // if this path is a directory, output this message. - // if a file, potentially leads to confusion as to what the client is actually doing - if (!cleanupLocalFiles) { - // if --cleanup-local-files was set, we will not be uploading data - log.vlog("Uploading new items of ", logPath); + JSONValue response; + // Create a new OneDrive API instance + OneDriveApi uploadLastModifiedTimeApiInstance; + uploadLastModifiedTimeApiInstance = new OneDriveApi(appConfig); + uploadLastModifiedTimeApiInstance.initialise(); + + // Try and update the online last modified time + try { + // Use this instance + response = uploadLastModifiedTimeApiInstance.updateById(driveId, id, data, eTagValue); + // Shut the instance down + uploadLastModifiedTimeApiInstance.shutdown(); + // Free object and memory + object.destroy(uploadLastModifiedTimeApiInstance); + // Is the response a valid JSON object - validation checking done in saveItem + saveItem(response); + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadLastModifiedTimeApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to update the timestamp on an item on OneDrive - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.vdebug("Retrying Function: ", thisFunctionName); + uploadLastModifiedTime(driveId, id, mtime, eTag); + return; + } else { + // Default operation if not 408,429,503,504 errors + if (exception.httpStatusCode == 409) { + // ETag does not match current item's value - use a null eTag + log.vdebug("Retrying Function: ", thisFunctionName); + uploadLastModifiedTime(driveId, id, mtime, null); + } else { + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + } } } - - // Filesystem walk to find extra files that reside locally. - // If --cleanup-local-files is not used, these will be uploaded (normal operation) - // If --download-only --cleanup-local-files is being used, extra files found locally will be deleted from the local filesystem - uploadNewItems(path); } - private void uploadDifferences(const ref Item item) - { - // see if this item.id we were supposed to have deleted - // match early and return - if (dryRun) { - foreach (i; idsToDelete) { - if (i[1] == item.id) { - return; - } - } - } - - bool unwanted = false; - string path; + // Perform a database integrity check - checking all the items that are in-sync at the moment, validating what we know should be on disk, to what is actually on disk + void performDatabaseConsistencyAndIntegrityCheck() { - // Compute this item path early as we we use this path often - path = computeItemPath(item.driveId, item.id); + // Log what we are doing + if (!appConfig.surpressLoggingOutput) { + log.log("Performing a database consistency and integrity check on locally stored data ... "); + } - // item.id was in the database associated with the item.driveId specified - log.vlog("Processing ", buildNormalizedPath(path)); + // What driveIDsArray do we use? If we are doing a --single-directory we need to use just the drive id associated with that operation + string[] consistencyCheckDriveIdsArray; + if (singleDirectoryScope) { + consistencyCheckDriveIdsArray ~= singleDirectoryScopeDriveId; + } else { + consistencyCheckDriveIdsArray = driveIDsArray; + } - // What type of DB item are we processing - // Is this item excluded by user configuration of skip_dir or skip_file? - // Is this item a directory or 'remote' type? A 'remote' type is a folder DB tie so should be compared as directory for exclusion - if ((item.type == ItemType.dir)||(item.type == ItemType.remote)) { - // Do we need to check for .nosync? Only if --check-for-nosync was passed in - if (cfg.getValueBool("check_nosync")) { - if (exists(path ~ "/.nosync")) { - log.vlog("Skipping item - .nosync found & --check-for-nosync enabled: ", path); - return; + // Create a new DB blank item + Item item; + // Use the array we populate, rather than selecting all distinct driveId's from the database + foreach (driveId; consistencyCheckDriveIdsArray) { + // Make the logging more accurate - we cant update driveId as this then breaks the below queries + log.vlog("Processing DB entries for this Drive ID: ", driveId); + + // What OneDrive API query do we use? + // - Are we running against a National Cloud Deployments that does not support /delta ? + // National Cloud Deployments do not support /delta as a query + // https://docs.microsoft.com/en-us/graph/deployments#supported-features + // + // - Are we performing a --single-directory sync, which will exclude many items online, focusing in on a specific online directory + // + // - Are we performing a --download-only --cleanup-local-files action? + // + // If we did, we self generated a /delta response, thus need to now process elements that are still flagged as out-of-sync + if ((singleDirectoryScope) || (nationalCloudDeployment) || (cleanupLocalFiles)) { + // Any entry in the DB than is flagged as out-of-sync needs to be cleaned up locally first before we scan the entire DB + // Normally, this is done at the end of processing all /delta queries, however when using --single-directory or a National Cloud Deployments is configured + // We cant use /delta to query the OneDrive API as National Cloud Deployments dont support /delta + // https://docs.microsoft.com/en-us/graph/deployments#supported-features + // We dont use /delta for --single-directory as, in order to sync a single path with /delta, we need to query the entire OneDrive API JSON data to then filter out + // objects that we dont want, thus, it is easier to use the same method as National Cloud Deployments, but query just the objects we are after + + // For each unique OneDrive driveID we know about + Item[] outOfSyncItems = itemDB.selectOutOfSyncItems(driveId); + foreach (outOfSyncItem; outOfSyncItems) { + if (!dryRun) { + // clean up idsToDelete + idsToDelete.length = 0; + assumeSafeAppend(idsToDelete); + // flag to delete local file as it now is no longer in sync with OneDrive + log.vdebug("Flagging to delete local item as it now is no longer in sync with OneDrive"); + log.vdebug("outOfSyncItem: ", outOfSyncItem); + idsToDelete ~= [outOfSyncItem.driveId, outOfSyncItem.id]; + // delete items in idsToDelete + if (idsToDelete.length > 0) processDeleteItems(); + } + } + + // Fetch database items associated with this path + Item[] driveItems; + if (singleDirectoryScope) { + // Use the --single-directory items we previously configured + // - query database for children objects using those items + driveItems = getChildren(singleDirectoryScopeDriveId, singleDirectoryScopeItemId); + } else { + // Check everything associated with each driveId we know about + log.vdebug("Selecting DB items via itemDB.selectByDriveId(driveId)"); + // Query database + driveItems = itemDB.selectByDriveId(driveId); + } + + log.vdebug("Database items to process for this driveId: ", driveItems.count); + // Process each database database item associated with the driveId + foreach(dbItem; driveItems) { + // Does it still exist on disk in the location the DB thinks it is + checkDatabaseItemForConsistency(dbItem); + } + } else { + // Check everything associated with each driveId we know about + log.vdebug("Selecting DB items via itemDB.selectByDriveId(driveId)"); + // Query database + auto driveItems = itemDB.selectByDriveId(driveId); + log.vdebug("Database items to process for this driveId: ", driveItems.count); + // Process each database database item associated with the driveId + foreach(dbItem; driveItems) { + // Does it still exist on disk in the location the DB thinks it is + checkDatabaseItemForConsistency(dbItem); } } - // Is the path excluded? - unwanted = selectiveSync.isDirNameExcluded(item.name); } - // Is this item a file? - if (item.type == ItemType.file) { - // Is the filename excluded? - unwanted = selectiveSync.isFileNameExcluded(item.name); - } - - // If path or filename does not exclude, is this excluded due to use of selective sync? - if (!unwanted) { - // is sync_list configured - if (syncListConfigured) { - // sync_list configured and in use - // Is the path excluded via sync_list? - unwanted = selectiveSync.isPathExcludedViaSyncList(path); + // Are we doing a --download-only sync? + if (!appConfig.getValueBool("download_only")) { + // Do we have any known items, where the content has changed locally, that needs to be uploaded? + if (!databaseItemsWhereContentHasChanged.empty) { + // There are changed local files that were in the DB to upload + log.log("Changed local items to upload to OneDrive: ", databaseItemsWhereContentHasChanged.length); + processChangedLocalItemsToUpload(); + // Cleanup array memory + databaseItemsWhereContentHasChanged = []; } } - - // skip unwanted items - if (unwanted) { - //log.vlog("Filtered out"); - return; - } + } + + // Check this Database Item for its consistency on disk + void checkDatabaseItemForConsistency(Item dbItem) { + + // What is the local path item + string localFilePath; + // Do we want to onward process this item? + bool unwanted = false; - // Check against Microsoft OneDrive restriction and limitations about Windows naming files - if (!isValidName(path)) { - log.logAndNotify("Skipping item - invalid name (Microsoft Naming Convention): ", path); - return; - } + // Compute this dbItem path early as we we use this path often + localFilePath = buildNormalizedPath(computeItemPath(dbItem.driveId, dbItem.id)); - // Check for bad whitespace items - if (!containsBadWhiteSpace(path)) { - log.logAndNotify("Skipping item - invalid name (Contains an invalid whitespace item): ", path); - return; + // To improve logging output for this function, what is the 'logical path'? + string logOutputPath; + if (localFilePath == ".") { + // get the configured sync_dir + logOutputPath = buildNormalizedPath(appConfig.getValueString("sync_dir")); + } else { + // use what was computed + logOutputPath = localFilePath; } - // Check for HTML ASCII Codes as part of file name - if (!containsASCIIHTMLCodes(path)) { - log.logAndNotify("Skipping item - invalid name (Contains HTML ASCII Code): ", path); - return; - } + // Log what we are doing + log.vlog("Processing ", logOutputPath); - final switch (item.type) { - case ItemType.dir: - uploadDirDifferences(item, path); - break; + // Determine which action to take + final switch (dbItem.type) { case ItemType.file: - uploadFileDifferences(item, path); + // Logging output + checkFileDatabaseItemForConsistency(dbItem, localFilePath); + break; + case ItemType.dir: + // Logging output + checkDirectoryDatabaseItemForConsistency(dbItem, localFilePath); break; case ItemType.remote: - uploadRemoteDirDifferences(item, path); + // checkRemoteDirectoryDatabaseItemForConsistency(dbItem, localFilePath); + break; + case ItemType.unknown: + // Unknown type - we dont action these items break; } } - - private void uploadDirDifferences(const ref Item item, const(string) path) - { - assert(item.type == ItemType.dir); - if (exists(path)) { - // Fix https://github.com/abraunegg/onedrive/issues/1915 - try { - if (!isDir(path)) { - log.vlog("The item was a directory but now it is a file"); - uploadDeleteItem(item, path); - uploadNewFile(path); - } else { - log.vlog("The directory has not changed"); - // loop through the children - foreach (Item child; itemdb.selectChildren(item.driveId, item.id)) { - uploadDifferences(child); - } - } - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - return; - } - } else { - // Directory does not exist locally - // If we are in a --dry-run situation - this directory may never have existed as we never downloaded it - if (!dryRun) { - // Not --dry-run situation - if (!cfg.getValueBool("monitor")) { - // Not in --monitor mode - log.vlog("The directory has been deleted locally"); - } else { - // Appropriate message as we are in --monitor mode - log.vlog("The directory appears to have been deleted locally .. but we are running in --monitor mode. This may have been 'moved' on the local filesystem rather than being 'deleted'"); - log.vdebug("Most likely cause - 'inotify' event was missing for whatever action was taken locally or action taken when application was stopped"); - } - // A moved file will be uploaded as 'new', delete the old file and reference - if (noRemoteDelete) { - // do not process remote directory delete - log.vlog("Skipping remote directory delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } - } else { - // we are in a --dry-run situation, directory appears to have deleted locally - this directory may never have existed as we never downloaded it .. - // Check if path does not exist in database - Item databaseItem; - if (!itemdb.selectByPath(path, defaultDriveId, databaseItem)) { - // Path not found in database - log.vlog("The directory has been deleted locally"); - if (noRemoteDelete) { - // do not process remote directory delete - log.vlog("Skipping remote directory delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } - } else { - // Path was found in the database - // Did we 'fake create it' as part of --dry-run ? - foreach (i; idsFaked) { - if (i[1] == item.id) { - log.vdebug("Matched faked dir which is 'supposed' to exist but not created due to --dry-run use"); - log.vlog("The directory has not changed"); - return; - } - } - // item.id did not match a 'faked' download new directory creation - log.vlog("The directory has been deleted locally"); - uploadDeleteItem(item, path); - } - } - } - } - - private void uploadRemoteDirDifferences(const ref Item item, const(string) path) - { - assert(item.type == ItemType.remote); - if (exists(path)) { - if (!isDir(path)) { - log.vlog("The item was a directory but now it is a file"); - uploadDeleteItem(item, path); - uploadNewFile(path); - } else { - log.vlog("The directory has not changed"); - // continue through the linked folder - assert(item.remoteDriveId && item.remoteId); - Item remoteItem; - bool found = itemdb.selectById(item.remoteDriveId, item.remoteId, remoteItem); - if(found){ - // item was found in the database - uploadDifferences(remoteItem); - } - } - } else { - // are we in a dry-run scenario - if (!dryRun) { - // no dry-run - log.vlog("The directory has been deleted locally"); - if (noRemoteDelete) { - // do not process remote directory delete - log.vlog("Skipping remote directory delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } - } else { - // we are in a --dry-run situation, directory appears to have deleted locally - this directory may never have existed as we never downloaded it .. - // Check if path does not exist in database - Item databaseItem; - if (!itemdb.selectByPathWithoutRemote(path, defaultDriveId, databaseItem)) { - // Path not found in database - log.vlog("The directory has been deleted locally"); - if (noRemoteDelete) { - // do not process remote directory delete - log.vlog("Skipping remote directory delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } - } else { - // Path was found in the database - // Did we 'fake create it' as part of --dry-run ? - foreach (i; idsFaked) { - if (i[1] == item.id) { - log.vdebug("Matched faked dir which is 'supposed' to exist but not created due to --dry-run use"); - log.vlog("The directory has not changed"); - return; - } - } - // item.id did not match a 'faked' download new directory creation - log.vlog("The directory has been deleted locally"); - uploadDeleteItem(item, path); - } - } - } - } - - // upload local file system differences to OneDrive - private void uploadFileDifferences(const ref Item item, const(string) path) - { - // Reset upload failure - OneDrive or filesystem issue (reading data) - uploadFailed = false; + + // Perform the database consistency check on this file item + void checkFileDatabaseItemForConsistency(Item dbItem, string localFilePath) { - // uploadFileDifferences is called when processing DB entries to compare against actual files on disk + // What is the source of this item data? string itemSource = "database"; - - assert(item.type == ItemType.file); - if (exists(path)) { - if (isFile(path)) { - // can we actually read the local file? - if (readLocalFile(path)){ - // file is readable - SysTime localModifiedTime = timeLastModified(path).toUTC(); - SysTime itemModifiedTime = item.mtime; - // HACK: reduce time resolution to seconds before comparing + + // Does this item|file still exist on disk? + if (exists(localFilePath)) { + // Path exists locally, is this path a file? + if (isFile(localFilePath)) { + // Can we actually read the local file? + if (readLocalFile(localFilePath)){ + // File is readable + SysTime localModifiedTime = timeLastModified(localFilePath).toUTC(); + SysTime itemModifiedTime = dbItem.mtime; + // Reduce time resolution to seconds before comparing itemModifiedTime.fracSecs = Duration.zero; localModifiedTime.fracSecs = Duration.zero; if (localModifiedTime != itemModifiedTime) { - log.vlog("The file last modified time has changed"); + // The modified dates are different log.vdebug("The local item has a different modified time ", localModifiedTime, " when compared to ", itemSource, " modified time ", itemModifiedTime); - string eTag = item.eTag; - - // perform file hash tests - has the content of the file changed? - if (!testFileHash(path, item)) { - log.vlog("The file content has changed"); - log.vdebug("The local item has a different hash when compared to ", itemSource, " item hash"); - write("Uploading modified file ", path, " ... "); - JSONValue response; - - if (!dryRun) { - // Get the file size - long thisFileSize = getSize(path); - // Are we using OneDrive Personal or OneDrive Business? - // To solve 'Multiple versions of file shown on website after single upload' (https://github.com/abraunegg/onedrive/issues/2) - // check what 'account type' this is as this issue only affects OneDrive Business so we need some extra logic here - if (accountType == "personal"){ - // Original file upload logic - if (thisFileSize <= thresholdFileSize) { - try { - response = onedrive.simpleUploadReplace(path, item.driveId, item.id, item.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 404) { - // HTTP request returned status code 404 - the eTag provided does not exist - // Delete record from the local database - file will be uploaded as a new file - writeln("skipped."); - log.vlog("OneDrive returned a 'HTTP 404 - eTag Issue' - gracefully handling error"); - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return; - } - // Resolve https://github.com/abraunegg/onedrive/issues/36 - if ((e.httpStatusCode == 409) || (e.httpStatusCode == 423)) { - // The file is currently checked out or locked for editing by another user - // We cant upload this file at this time - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - write("", path, " is currently checked out or locked for editing by another user."); - log.fileOnly(path, " is currently checked out or locked for editing by another user."); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 412) { - // HTTP request returned status code 412 - ETag does not match current item's value - // Delete record from the local database - file will be uploaded as a new file - writeln("skipped."); - log.vdebug("Simple Upload Replace Failed - OneDrive eTag / cTag match issue (Personal Account)"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error. Will upload as new file."); - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request as a session"); - // Try upload as a session - response = session.upload(path, item.driveId, item.parentId, baseName(path), item.eTag); - } else { - // display what the error is - writeln("skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - // upload done without error - writeln("done."); - } else { - writeln(""); - try { - response = session.upload(path, item.driveId, item.parentId, baseName(path), item.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 412) { - // HTTP request returned status code 412 - ETag does not match current item's value - // Delete record from the local database - file will be uploaded as a new file - writeln("skipped."); - log.vdebug("Session Upload Replace Failed - OneDrive eTag / cTag match issue (Personal Account)"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error. Will upload as new file."); - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return; - } else { - // display what the error is - writeln("skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - // upload done without error - writeln("done."); - } + // Test the file hash + if (!testFileHash(localFilePath, dbItem)) { + // Is the local file 'newer' or 'older' (ie was an old file 'restored locally' by a different backup / replacement process?) + if (localModifiedTime >= itemModifiedTime) { + // Local file is newer + if (!appConfig.getValueBool("download_only")) { + log.vlog("The file content has changed locally and has a newer timestamp, thus needs to be uploaded to OneDrive"); + // Add to an array of files we need to upload as this file has changed locally in-between doing the /delta check and performing this check + databaseItemsWhereContentHasChanged ~= [dbItem.driveId, dbItem.id, localFilePath]; } else { - // OneDrive Business Account - // We need to always use a session to upload, but handle the changed file correctly - if (accountType == "business"){ - try { - // is this a zero-byte file? - if (thisFileSize == 0) { - // the file we are trying to upload as a session is a zero byte file - we cant use a session to upload or replace the file - // as OneDrive technically does not support zero byte files - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("Skip Reason: Microsoft OneDrive does not support 'zero-byte' files as a modified upload. Will upload as new file."); - // delete file on OneDrive - onedrive.deleteById(item.driveId, item.id, item.eTag); - // delete file from local database - itemdb.deleteById(item.driveId, item.id); - return; - } else { - if ((!syncBusinessFolders) || (item.driveId == defaultDriveId)) { - // For logging consistency - writeln(""); - // If we are not syncing Shared Business Folders, or this change is going to the 'users' default drive, handle normally - // Perform a normal session upload - response = session.upload(path, item.driveId, item.parentId, baseName(path), item.eTag); - } else { - // If we are uploading to a shared business folder, there are a couple of corner cases here: - // 1. Shared Folder is a 'users' folder - // 2. Shared Folder is a 'SharePoint Library' folder, meaning we get hit by this stupidity: https://github.com/OneDrive/onedrive-api-docs/issues/935 - response = handleSharePointMetadataAdditionBug(item, path); - } - } - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - // Resolve https://github.com/abraunegg/onedrive/issues/36 - if ((e.httpStatusCode == 409) || (e.httpStatusCode == 423)) { - // The file is currently checked out or locked for editing by another user - // We cant upload this file at this time - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - writeln("", path, " is currently checked out or locked for editing by another user."); - log.fileOnly(path, " is currently checked out or locked for editing by another user."); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 412) { - // HTTP request returned status code 412 - ETag does not match current item's value - // Delete record from the local database - file will be uploaded as a new file - writeln("skipped."); - log.vdebug("Session Upload Replace Failed - OneDrive eTag / cTag match issue (Business Account)"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error. Will upload as new file."); - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return; - } else { - // display what the error is - writeln("skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - // Did the upload fail? - if (!uploadFailed){ - // upload done without error or failure - writeln("done."); - // As the session.upload includes the last modified time, save the response - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } else { - // uploadFailed, return - return; - } - } - - // OneDrive documentLibrary - if (accountType == "documentLibrary"){ - // is this a zero-byte file? - if (thisFileSize == 0) { - // the file we are trying to upload as a session is a zero byte file - we cant use a session to upload or replace the file - // as OneDrive technically does not support zero byte files - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("Skip Reason: Microsoft OneDrive does not support 'zero-byte' files as a modified upload. Will upload as new file."); - // delete file on OneDrive - onedrive.deleteById(item.driveId, item.id, item.eTag); - // delete file from local database - itemdb.deleteById(item.driveId, item.id); - return; - } else { - // Due to https://github.com/OneDrive/onedrive-api-docs/issues/935 Microsoft modifies all PDF, MS Office & HTML files with added XML content. It is a 'feature' of SharePoint. - // This means, as a session upload, on 'completion' the file is 'moved' and generates a 404 ...... - response = handleSharePointMetadataAdditionBug(item, path); - - // Did the upload fail? - if (!uploadFailed){ - // upload done without error or failure - writeln("done."); - // As the session.upload includes the last modified time, save the response - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } else { - // uploadFailed, return - return; - } - } - } + log.vlog("The file content has changed locally and has a newer timestamp. The file will remain different to online file due to --download-only being used"); } - - // Update etag with ctag from response - if ("cTag" in response) { - // use the cTag instead of the eTag because OneDrive may update the metadata of files AFTER they have been uploaded via simple upload - eTag = response["cTag"].str; + } else { + // Local file is older - data recovery process? something else? + if (!appConfig.getValueBool("download_only")) { + log.vlog("The file content has changed locally and file now has a older timestamp. Uploading this file to OneDrive may potentially cause data-loss online"); + // Add to an array of files we need to upload as this file has changed locally in-between doing the /delta check and performing this check + databaseItemsWhereContentHasChanged ~= [dbItem.driveId, dbItem.id, localFilePath]; } else { - // Is there an eTag in the response? - if ("eTag" in response) { - // use the eTag from the response as there was no cTag - eTag = response["eTag"].str; - } else { - // no tag available - set to nothing - eTag = ""; - } + log.vlog("The file content has changed locally and file now has a older timestamp. The file will remain different to online file due to --download-only being used"); } - - // log that the modified file was uploaded successfully - log.fileOnly("Uploading modified file ", path, " ... done."); - - // update free space tracking if this is our drive id - if (item.driveId == defaultDriveId) { - // how much space is left on OneDrive after upload? - remainingFreeSpace = (remainingFreeSpace - thisFileSize); - log.vlog("Remaining free space on OneDrive: ", remainingFreeSpace); - } - } else { - // we are --dry-run - simulate the file upload - writeln("done."); - response = createFakeResponse(path); - // Log action to log file - log.fileOnly("Uploading modified file ", path, " ... done."); - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - return; } - } - if (accountType == "personal"){ - // If Personal, call to update the modified time as stored on OneDrive + } else { + // The file contents have not changed, but the modified timestamp has + log.vlog("The last modified timestamp has changed however the file content has not changed"); + log.vlog("The local item has the same hash value as the item online - correcting timestamp online"); if (!dryRun) { - uploadLastModifiedTime(item.driveId, item.id, eTag, localModifiedTime.toUTC()); + // Attempt to update the online date time stamp + uploadLastModifiedTime(dbItem.driveId, dbItem.id, localModifiedTime.toUTC(), dbItem.eTag); } } } else { + // The file has not changed log.vlog("The file has not changed"); } } else { //The file is not readable - skipped - log.log("Skipping processing this file as it cannot be read (file permissions or file corruption): ", path); - uploadFailed = true; + log.log("Skipping processing this file as it cannot be read (file permissions or file corruption): ", localFilePath); } } else { + // The item was a file but now is a directory log.vlog("The item was a file but now is a directory"); - uploadDeleteItem(item, path); - uploadCreateDir(path); } } else { - // File does not exist locally + // File does not exist locally, but is in our database as a dbItem containing all the data was passed into this function // If we are in a --dry-run situation - this file may never have existed as we never downloaded it if (!dryRun) { // Not --dry-run situation - if (!cfg.getValueBool("monitor")) { - log.vlog("The file has been deleted locally"); - } else { - // Appropriate message as we are in --monitor mode - log.vlog("The file appears to have been deleted locally .. but we are running in --monitor mode. This may have been 'moved' on the local filesystem rather than being 'deleted'"); - log.vdebug("Most likely cause - 'inotify' event was missing for whatever action was taken locally or action taken when application was stopped"); - } - // A moved file will be uploaded as 'new', delete the old file and reference - if (noRemoteDelete) { - // do not process remote file delete - log.vlog("Skipping remote file delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } + log.vlog("The file has been deleted locally"); + // Upload to OneDrive the instruction to delete this item. This will handle the 'noRemoteDelete' flag if set + uploadDeletedItem(dbItem, localFilePath); } else { - // We are in a --dry-run situation, file appears to have deleted locally - this file may never have existed as we never downloaded it .. - // Check if path does not exist in database - Item databaseItem; - if (!itemdb.selectByPath(path, defaultDriveId, databaseItem)) { - // file not found in database - log.vlog("The file has been deleted locally"); - if (noRemoteDelete) { - // do not process remote file delete - log.vlog("Skipping remote file delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } - } else { - // file was found in the database - // Did we 'fake create it' as part of --dry-run ? - foreach (i; idsFaked) { - if (i[1] == item.id) { - log.vdebug("Matched faked file which is 'supposed' to exist but not created due to --dry-run use"); - log.vlog("The file has not changed"); - return; - } + // We are in a --dry-run situation, file appears to have been deleted locally - this file may never have existed locally as we never downloaded it due to --dry-run + // Did we 'fake create it' as part of --dry-run ? + bool idsFakedMatch = false; + foreach (i; idsFaked) { + if (i[1] == dbItem.id) { + log.vdebug("Matched faked file which is 'supposed' to exist but not created due to --dry-run use"); + log.vlog("The file has not changed"); + idsFakedMatch = true; } - // item.id did not match a 'faked' download new file creation + } + if (!idsFakedMatch) { + // dbItem.id did not match a 'faked' download new file creation - so this in-sync object was actually deleted locally, but we are in a --dry-run situation log.vlog("The file has been deleted locally"); - if (noRemoteDelete) { - // do not process remote file delete - log.vlog("Skipping remote file delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } + // Upload to OneDrive the instruction to delete this item. This will handle the 'noRemoteDelete' flag if set + uploadDeletedItem(dbItem, localFilePath); } } } } - private JSONValue handleSharePointMetadataAdditionBug(const ref Item item, const(string) path) - { - // Explicit function for handling https://github.com/OneDrive/onedrive-api-docs/issues/935 - JSONValue response; - // Handle certain file types differently - if ((extension(path) == ".txt") || (extension(path) == ".csv")) { - // .txt and .csv are unaffected by https://github.com/OneDrive/onedrive-api-docs/issues/935 - // For logging consistency - writeln(""); + // Perform the database consistency check on this directory item + void checkDirectoryDatabaseItemForConsistency(Item dbItem, string localFilePath) { + + // What is the source of this item data? + string itemSource = "database"; + + // Does this item|directory still exist on disk? + if (exists(localFilePath)) { + // Fix https://github.com/abraunegg/onedrive/issues/1915 try { - response = session.upload(path, item.driveId, item.parentId, baseName(path), item.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return response; - } - // Resolve https://github.com/abraunegg/onedrive/issues/36 - if ((e.httpStatusCode == 409) || (e.httpStatusCode == 423)) { - // The file is currently checked out or locked for editing by another user - // We cant upload this file at this time - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - writeln("", path, " is currently checked out or locked for editing by another user."); - log.fileOnly(path, " is currently checked out or locked for editing by another user."); - uploadFailed = true; - return response; - } - if (e.httpStatusCode == 412) { - // HTTP request returned status code 412 - ETag does not match current item's value - // Delete record from the local database - file will be uploaded as a new file - writeln("skipped."); - log.vdebug("Session Upload Replace Failed - OneDrive eTag / cTag match issue (Sharepoint Library)"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error. Will upload as new file."); - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return response; + if (!isDir(localFilePath)) { + log.vlog("The item was a directory but now it is a file"); + uploadDeletedItem(dbItem, localFilePath); + uploadNewFile(localFilePath); } else { - // display what the error is - writeln("skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; + // Directory still exists locally + log.vlog("The directory has not changed"); + // When we are using --single-directory, we use a the getChildren() call to get all children of a path, meaning all children are already traversed + // Thus, if we traverse the path of this directory .. we end up with double processing & log output .. which is not ideal + if (!singleDirectoryScope) { + // loop through the children + foreach (Item child; itemDB.selectChildren(dbItem.driveId, dbItem.id)) { + checkDatabaseItemForConsistency(child); + } + } } } catch (FileException e) { // display the error message - writeln("skipped."); displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; } - // upload done without error - writeln("done."); } else { - // Due to https://github.com/OneDrive/onedrive-api-docs/issues/935 Microsoft modifies all PDF, MS Office & HTML files with added XML content. It is a 'feature' of SharePoint. - // This means, as a session upload, on 'completion' the file is 'moved' and generates a 404 ...... - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("Skip Reason: Microsoft Sharepoint 'enrichment' after upload issue"); - log.vlog("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details"); - // Delete record from the local database - file will be uploaded as a new file - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return response; + // Directory does not exist locally, but it is in our database as a dbItem containing all the data was passed into this function + // If we are in a --dry-run situation - this directory may never have existed as we never created it + if (!dryRun) { + // Not --dry-run situation + if (!appConfig.getValueBool("monitor")) { + // Not in --monitor mode + log.vlog("The directory has been deleted locally"); + } else { + // Appropriate message as we are in --monitor mode + log.vlog("The directory appears to have been deleted locally .. but we are running in --monitor mode. This may have been 'moved' on the local filesystem rather than being 'deleted'"); + log.vdebug("Most likely cause - 'inotify' event was missing for whatever action was taken locally or action taken when application was stopped"); + } + // A moved directory will be uploaded as 'new', delete the old directory and database reference + // Upload to OneDrive the instruction to delete this item. This will handle the 'noRemoteDelete' flag if set + uploadDeletedItem(dbItem, localFilePath); + } else { + // We are in a --dry-run situation, directory appears to have been deleted locally - this directory may never have existed locally as we never created it due to --dry-run + // Did we 'fake create it' as part of --dry-run ? + bool idsFakedMatch = false; + foreach (i; idsFaked) { + if (i[1] == dbItem.id) { + log.vdebug("Matched faked dir which is 'supposed' to exist but not created due to --dry-run use"); + log.vlog("The directory has not changed"); + idsFakedMatch = true; + } + } + if (!idsFakedMatch) { + // dbItem.id did not match a 'faked' download new directory creation - so this in-sync object was actually deleted locally, but we are in a --dry-run situation + log.vlog("The directory has been deleted locally"); + // Upload to OneDrive the instruction to delete this item. This will handle the 'noRemoteDelete' flag if set + uploadDeletedItem(dbItem, localFilePath); + } else { + // When we are using --single-directory, we use a the getChildren() call to get all children of a path, meaning all children are already traversed + // Thus, if we traverse the path of this directory .. we end up with double processing & log output .. which is not ideal + if (!singleDirectoryScope) { + // loop through the children + foreach (Item child; itemDB.selectChildren(dbItem.driveId, dbItem.id)) { + checkDatabaseItemForConsistency(child); + } + } + } + } } - - // return a JSON response so that it can be used and saved - return response; } - - // upload new items to OneDrive - private void uploadNewItems(const(string) path) - { - static import std.utf; - import std.range : walkLength; - import std.uni : byGrapheme; - // https://support.microsoft.com/en-us/help/3125202/restrictions-and-limitations-when-you-sync-files-and-folders - // If the path is greater than allowed characters, then one drive will return a '400 - Bad Request' - // Need to ensure that the URI is encoded before the check is made: - // - 400 Character Limit for OneDrive Business / Office 365 - // - 430 Character Limit for OneDrive Personal - long maxPathLength = 0; - long pathWalkLength = 0; + + // Does this local path (directory or file) conform with the Microsoft Naming Restrictions? + bool checkPathAgainstMicrosoftNamingRestrictions(string localFilePath) { + + // Check if the given path violates certain Microsoft restrictions and limitations + // Return a true|false response + bool invalidPath = false; - // Configure maxPathLength based on account type - if (accountType == "personal"){ - // Personal Account - maxPathLength = 430; - } else { - // Business Account / Office365 - maxPathLength = 400; + // Check against Microsoft OneDrive restriction and limitations about Windows naming files + if (!invalidPath) { + if (!isValidName(localFilePath)) { + log.logAndNotify("Skipping item - invalid name (Microsoft Naming Convention): ", localFilePath); + invalidPath = true; + } } - // A short lived file that has disappeared will cause an error - is the path valid? - if (!exists(path)) { - log.log("Skipping item - path has disappeared: ", path); - return; + // Check for bad whitespace items + if (!invalidPath) { + if (!containsBadWhiteSpace(localFilePath)) { + log.logAndNotify("Skipping item - invalid name (Contains an invalid whitespace item): ", localFilePath); + invalidPath = true; + } } - // Calculate the path length by walking the path, catch any UTF-8 character errors - // https://github.com/abraunegg/onedrive/issues/487 - // https://github.com/abraunegg/onedrive/issues/1192 - try { - pathWalkLength = path.byGrapheme.walkLength; - } catch (std.utf.UTFException e) { - // path contains characters which generate a UTF exception - log.vlog("Skipping item - invalid UTF sequence: ", path); - log.vdebug(" Error Reason:", e.msg); - return; + // Check for HTML ASCII Codes as part of file name + if (!invalidPath) { + if (!containsASCIIHTMLCodes(localFilePath)) { + log.logAndNotify("Skipping item - invalid name (Contains HTML ASCII Code): ", localFilePath); + invalidPath = true; + } } + // Return if this is a valid path + return invalidPath; + } + + // Does this local path (directory or file) get excluded from any operation based on any client side filtering rules? + bool checkPathAgainstClientSideFiltering(string localFilePath) { - // check the std.encoding of the path - // https://github.com/skilion/onedrive/issues/57 - // https://github.com/abraunegg/onedrive/issues/487 - if(!isValid(path)) { - // Path is not valid according to https://dlang.org/phobos/std_encoding.html - log.vlog("Skipping item - invalid character encoding sequence: ", path); - return; - } + // Check the path against client side filtering rules + // - check_nosync + // - skip_dotfiles + // - skip_symlinks + // - skip_file + // - skip_dir + // - sync_list + // - skip_size + // Return a true|false response - // Is the path length is less than maxPathLength - if(pathWalkLength < maxPathLength){ - // skip dot files if configured - if (cfg.getValueBool("skip_dotfiles")) { - if (isDotFile(path)) { - log.vlog("Skipping item - .file or .folder: ", path); - return; + bool clientSideRuleExcludesPath = false; + + // does the path exist? + if (!exists(localFilePath)) { + // path does not exist - we cant review any client side rules on something that does not exist locally + return clientSideRuleExcludesPath; + } + + // - check_nosync + if (!clientSideRuleExcludesPath) { + // Do we need to check for .nosync? Only if --check-for-nosync was passed in + if (appConfig.getValueBool("check_nosync")) { + if (exists(localFilePath ~ "/.nosync")) { + log.vlog("Skipping item - .nosync found & --check-for-nosync enabled: ", localFilePath); + clientSideRuleExcludesPath = true; } } - - // Do we need to check for .nosync? Only if --check-for-nosync was passed in - if (cfg.getValueBool("check_nosync")) { - if (exists(path ~ "/.nosync")) { - log.vlog("Skipping item - .nosync found & --check-for-nosync enabled: ", path); - return; + } + + // - skip_dotfiles + if (!clientSideRuleExcludesPath) { + // Do we need to check skip dot files if configured + if (appConfig.getValueBool("skip_dotfiles")) { + if (isDotFile(localFilePath)) { + log.vlog("Skipping item - .file or .folder: ", localFilePath); + clientSideRuleExcludesPath = true; } } - + } + + // - skip_symlinks + if (!clientSideRuleExcludesPath) { // Is the path a symbolic link - if (isSymlink(path)) { + if (isSymlink(localFilePath)) { // if config says so we skip all symlinked items - if (cfg.getValueBool("skip_symlinks")) { - log.vlog("Skipping item - skip symbolic links configured: ", path); - return; + if (appConfig.getValueBool("skip_symlinks")) { + log.vlog("Skipping item - skip symbolic links configured: ", localFilePath); + clientSideRuleExcludesPath = true; } // skip unexisting symbolic links - else if (!exists(readLink(path))) { + else if (!exists(readLink(localFilePath))) { // reading the symbolic link failed - is the link a relative symbolic link // drwxrwxr-x. 2 alex alex 46 May 30 09:16 . // drwxrwxr-x. 3 alex alex 35 May 30 09:14 .. @@ -4368,7 +2996,7 @@ final class SyncEngine // // absolute links will be able to be read, but 'relative' links will fail, because they cannot be read based on the current working directory 'sync_dir' string currentSyncDir = getcwd(); - string fullLinkPath = buildNormalizedPath(absolutePath(path)); + string fullLinkPath = buildNormalizedPath(absolutePath(localFilePath)); string fileName = baseName(fullLinkPath); string parentLinkPath = dirName(fullLinkPath); // test if this is a 'relative' symbolic link @@ -4379,2924 +3007,3980 @@ final class SyncEngine chdir(currentSyncDir); // results if (relativeLinkTest) { - log.vdebug("Not skipping item - symbolic link is a 'relative link' to target ('", relativeLink, "') which can be supported: ", path); + log.vdebug("Not skipping item - symbolic link is a 'relative link' to target ('", relativeLink, "') which can be supported: ", localFilePath); } else { - log.logAndNotify("Skipping item - invalid symbolic link: ", path); - return; + log.logAndNotify("Skipping item - invalid symbolic link: ", localFilePath); + clientSideRuleExcludesPath = true; } } } - - // Check for bad whitespace items - if (!containsBadWhiteSpace(path)) { - log.logAndNotify("Skipping item - invalid name (Contains an invalid whitespace item): ", path); - return; - } - - // Check for HTML ASCII Codes as part of file name - if (!containsASCIIHTMLCodes(path)) { - log.logAndNotify("Skipping item - invalid name (Contains HTML ASCII Code): ", path); - return; - } - - // Is this item excluded by user configuration of skip_dir or skip_file? - if (path != ".") { - if (isDir(path)) { - log.vdebug("Checking local path: ", path); + } + + // Is this item excluded by user configuration of skip_dir or skip_file? + if (!clientSideRuleExcludesPath) { + if (localFilePath != ".") { + // skip_dir handling + if (isDir(localFilePath)) { + log.vdebug("Checking local path: ", localFilePath); // Only check path if config is != "" - if (cfg.getValueString("skip_dir") != "") { + if (appConfig.getValueString("skip_dir") != "") { // The path that needs to be checked needs to include the '/' // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched - if (selectiveSync.isDirNameExcluded(path.strip('.'))) { - log.vlog("Skipping item - excluded by skip_dir config: ", path); - return; - } - } - - // In the event that this 'new item' is actually a OneDrive Business Shared Folder - // however the user may have omitted --sync-shared-folders, thus 'technically' this is a new item - // for this account OneDrive root, however this then would cause issues if --sync-shared-folders - // is added again after this sync - if ((exists(cfg.businessSharedFolderFilePath)) && (!syncBusinessFolders)){ - // business_shared_folders file exists, but we are not using / syncing them - // The file contents can only contain 'folder' names, so we need to strip './' from any path we are checking - if(selectiveSync.isSharedFolderMatched(strip(path,"./"))){ - // path detected as a 'new item' is matched as a path in business_shared_folders - log.vlog("Skipping item - excluded as included in business_shared_folders config: ", path); - log.vlog("To sync this directory to your OneDrive Account update your business_shared_folders config"); - return; + if (selectiveSync.isDirNameExcluded(localFilePath.strip('.'))) { + log.vlog("Skipping item - excluded by skip_dir config: ", localFilePath); + clientSideRuleExcludesPath = true; } } } - if (isFile(path)) { - log.vdebug("Checking file: ", path); + // skip_file handling + if (isFile(localFilePath)) { + log.vdebug("Checking file: ", localFilePath); // The path that needs to be checked needs to include the '/' // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched - if (selectiveSync.isFileNameExcluded(path.strip('.'))) { - log.vlog("Skipping item - excluded by skip_file config: ", path); - return; + if (selectiveSync.isFileNameExcluded(localFilePath.strip('.'))) { + log.vlog("Skipping item - excluded by skip_file config: ", localFilePath); + clientSideRuleExcludesPath = true; } } - - // is sync_list configured + } + } + + // Is this item excluded by user configuration of sync_list? + if (!clientSideRuleExcludesPath) { + if (localFilePath != ".") { if (syncListConfigured) { // sync_list configured and in use - if (selectiveSync.isPathExcludedViaSyncList(path)) { - if ((isFile(path)) && (cfg.getValueBool("sync_root_files")) && (rootName(path.strip('.').strip('/')) == "")) { - log.vdebug("Not skipping path due to sync_root_files inclusion: ", path); + if (selectiveSync.isPathExcludedViaSyncList(localFilePath)) { + if ((isFile(localFilePath)) && (appConfig.getValueBool("sync_root_files")) && (rootName(localFilePath.strip('.').strip('/')) == "")) { + log.vdebug("Not skipping path due to sync_root_files inclusion: ", localFilePath); } else { - string userSyncList = cfg.configDirName ~ "/sync_list"; - if (exists(userSyncList)){ + if (exists(appConfig.syncListFilePath)){ // skipped most likely due to inclusion in sync_list - log.vlog("Skipping item - excluded by sync_list config: ", path); - return; + log.vlog("Skipping item - excluded by sync_list config: ", localFilePath); + clientSideRuleExcludesPath = true; } else { // skipped for some other reason - log.vlog("Skipping item - path excluded by user config: ", path); - return; + log.vlog("Skipping item - path excluded by user config: ", localFilePath); + clientSideRuleExcludesPath = true; } } } } } - - // Check against Microsoft OneDrive restriction and limitations about Windows naming files - if (!isValidName(path)) { - log.logAndNotify("Skipping item - invalid name (Microsoft Naming Convention): ", path); - return; - } - - // If we are in a --dry-run scenario, we may have renamed a folder - but it is technically not renamed locally - // Thus, that entire path may be attemtped to be uploaded as new data to OneDrive - if (dryRun) { - // check the pathsRenamed array for this path - // if any match - we need to exclude this path - foreach (thisRenamedPath; pathsRenamed) { - log.vdebug("Renamed Path to evaluate: ", thisRenamedPath); - // Can we find 'thisRenamedPath' in the given 'path' - if (canFind(path, thisRenamedPath)) { - log.vdebug("Renamed Path MATCH - DONT UPLOAD AS NEW"); - return; + } + + // Check if this is excluded by a user set maximum filesize to upload + if (!clientSideRuleExcludesPath) { + if (isFile(localFilePath)) { + if (fileSizeLimit != 0) { + // Get the file size + ulong thisFileSize = getSize(localFilePath); + if (thisFileSize >= fileSizeLimit) { + log.vlog("Skipping item - excluded by skip_size config: ", localFilePath, " (", thisFileSize/2^^20," MB)"); } } } + } + + return clientSideRuleExcludesPath; + } + + // Does this JSON item (as received from OneDrive API) get excluded from any operation based on any client side filtering rules? + // This function is only used when we are fetching objects from the OneDrive API using a /children query to help speed up what object we query + bool checkJSONAgainstClientSideFiltering(JSONValue onedriveJSONItem) { - // We want to upload this new local data - if (isDir(path)) { - Item item; - bool pathFoundInDB = false; - foreach (driveId; driveIDsArray) { - if (itemdb.selectByPath(path, driveId, item)) { - pathFoundInDB = true; - } - } - - // Was the path found in the database? - if (!pathFoundInDB) { - // Path not found in database when searching all drive id's - if (!cleanupLocalFiles) { - // --download-only --cleanup-local-files not used - uploadCreateDir(path); + bool clientSideRuleExcludesPath = false; + + // Check the path against client side filtering rules + // - check_nosync (MISSING) + // - skip_dotfiles (MISSING) + // - skip_symlinks (MISSING) + // - skip_file + // - skip_dir + // - sync_list + // - skip_size (MISSING) + // Return a true|false response + + // Use the JSON elements rather can computing a DB struct via makeItem() + string thisItemId = onedriveJSONItem["id"].str; + string thisItemDriveId = onedriveJSONItem["parentReference"]["driveId"].str; + string thisItemParentId = onedriveJSONItem["parentReference"]["id"].str; + string thisItemName = onedriveJSONItem["name"].str; + + // Is this parent is in the database + bool parentInDatabase = false; + + // Calculate if the Parent Item is in the database so that it can be re-used + parentInDatabase = itemDB.idInLocalDatabase(thisItemDriveId, thisItemParentId); + + // Check if this is excluded by config option: skip_dir + if (!clientSideRuleExcludesPath) { + // Is the item a folder? + if (isItemFolder(onedriveJSONItem)) { + // Only check path if config is != "" + if (!appConfig.getValueString("skip_dir").empty) { + // work out the 'snippet' path where this folder would be created + string simplePathToCheck = ""; + string complexPathToCheck = ""; + string matchDisplay = ""; + + if (hasParentReference(onedriveJSONItem)) { + // we need to workout the FULL path for this item + // simple path + if (("name" in onedriveJSONItem["parentReference"]) != null) { + simplePathToCheck = onedriveJSONItem["parentReference"]["name"].str ~ "/" ~ onedriveJSONItem["name"].str; + } else { + simplePathToCheck = onedriveJSONItem["name"].str; + } + log.vdebug("skip_dir path to check (simple): ", simplePathToCheck); + + // complex path + if (parentInDatabase) { + // build up complexPathToCheck + //complexPathToCheck = buildNormalizedPath(newItemPath); + complexPathToCheck = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + } else { + log.vdebug("Parent details not in database - unable to compute complex path to check"); + } + if (!complexPathToCheck.empty) { + log.vdebug("skip_dir path to check (complex): ", complexPathToCheck); + } } else { - // we need to clean up this directory - log.log("Removing local directory as --download-only & --cleanup-local-files configured"); - // Remove any children of this path if they still exist - // Resolve 'Directory not empty' error when deleting local files - try { - foreach (DirEntry child; dirEntries(path, SpanMode.depth, false)) { - // what sort of child is this? - if (isDir(child.name)) { - log.log("Removing local directory: ", child.name); - } else { - log.log("Removing local file: ", child.name); - } - // are we in a --dry-run scenario? - if (!dryRun) { - // No --dry-run ... process local delete - try { - attrIsDir(child.linkAttributes) ? rmdir(child.name) : remove(child.name); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } - } - // Remove the path now that it is empty of children - log.log("Removing local directory: ", path); - // are we in a --dry-run scenario? - if (!dryRun) { - // No --dry-run ... process local delete - try { - rmdirRecurse(path); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - return; - } + simplePathToCheck = onedriveJSONItem["name"].str; } - } - - // recursively traverse children - // the above operation takes time and the directory might have - // disappeared in the meantime - if (!exists(path)) { - if (!cleanupLocalFiles) { - // --download-only --cleanup-local-files not used - log.vlog("Directory disappeared during upload: ", path); + + // If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder + // then isDirNameExcluded matching will not work + // Clean up 'root:' if present + if (startsWith(simplePathToCheck, "root:")){ + log.vdebug("Updating simplePathToCheck to remove 'root:'"); + simplePathToCheck = strip(simplePathToCheck, "root:"); } - return; - } - - // Try and access the directory and any path below - try { - auto entries = dirEntries(path, SpanMode.shallow, false); - foreach (DirEntry entry; entries) { - string thisPath = entry.name; - uploadNewItems(thisPath); + if (startsWith(complexPathToCheck, "root:")){ + log.vdebug("Updating complexPathToCheck to remove 'root:'"); + complexPathToCheck = strip(complexPathToCheck, "root:"); } - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - return; - } - } else { - // path is not a directory, is it a valid file? - // pipes - whilst technically valid files, are not valid for this client - // prw-rw-r--. 1 user user 0 Jul 7 05:55 my_pipe - if (isFile(path)) { - // Path is a valid file - bool fileFoundInDB = false; - Item item; - // Search the database for this file - foreach (driveId; driveIDsArray) { - if (itemdb.selectByPath(path, driveId, item)) { - fileFoundInDB = true; + // OK .. what checks are we doing? + if ((!simplePathToCheck.empty) && (complexPathToCheck.empty)) { + // just a simple check + log.vdebug("Performing a simple check only"); + clientSideRuleExcludesPath = selectiveSync.isDirNameExcluded(simplePathToCheck); + } else { + // simple and complex + log.vdebug("Performing a simple then complex path match if required"); + // simple first + log.vdebug("Performing a simple check first"); + clientSideRuleExcludesPath = selectiveSync.isDirNameExcluded(simplePathToCheck); + matchDisplay = simplePathToCheck; + if (!clientSideRuleExcludesPath) { + log.vdebug("Simple match was false, attempting complex match"); + // simple didnt match, perform a complex check + clientSideRuleExcludesPath = selectiveSync.isDirNameExcluded(complexPathToCheck); + matchDisplay = complexPathToCheck; } } - - // Was the file found in the database? - if (!fileFoundInDB) { - // File not found in database when searching all drive id's - // Do we upload the file or clean up the file? - if (!cleanupLocalFiles) { - // --download-only --cleanup-local-files not used - uploadNewFile(path); - // Did the upload fail? - if (!uploadFailed) { - // Upload did not fail - // Issue #763 - Delete local files after sync handling - // are we in an --upload-only & --remove-source-files scenario? - if ((uploadOnly) && (localDeleteAfterUpload)) { - // Log that we are deleting a local item - log.log("Removing local file as --upload-only & --remove-source-files configured"); - // are we in a --dry-run scenario? - log.vdebug("Removing local file: ", path); - if (!dryRun) { - // No --dry-run ... process local file delete - safeRemove(path); - } - } - } - } else { - // we need to clean up this file - log.log("Removing local file as --download-only & --cleanup-local-files configured"); - // are we in a --dry-run scenario? - log.log("Removing local file: ", path); - if (!dryRun) { - // No --dry-run ... process local file delete - safeRemove(path); - } - } + // result + log.vdebug("skip_dir exclude result (directory based): ", clientSideRuleExcludesPath); + if (clientSideRuleExcludesPath) { + // This path should be skipped + log.vlog("Skipping item - excluded by skip_dir config: ", matchDisplay); } - } else { - // path is not a valid file - log.log("Skipping item - item is not a valid file: ", path); } } - } else { - // This path was skipped - why? - log.log("Skipping item '", path, "' due to the full path exceeding ", maxPathLength, " characters (Microsoft OneDrive limitation)"); } - } - - // create new directory on OneDrive - private void uploadCreateDir(const(string) path) - { - log.vlog("OneDrive Client requested to create remote path: ", path); - - JSONValue onedrivePathDetails; - Item parent; - - // Was the path entered the root path? - if (path != "."){ - // What parent path to use? - string parentPath = dirName(path); // will be either . or something else - if (parentPath == "."){ - // Assume this is a new 'local' folder in the users configured sync_dir - // Use client defaults - parent.id = defaultRootId; // Should give something like 12345ABCDE1234A1!101 - parent.driveId = defaultDriveId; // Should give something like 12345abcde1234a1 - } else { - // Query the database using each of the driveId's we are using - foreach (driveId; driveIDsArray) { - // Query the database for this parent path using each driveId - Item dbResponse; - if(itemdb.selectByPathWithoutRemote(parentPath, driveId, dbResponse)){ - // parent path was found in the database - parent = dbResponse; - } - } - } - - // If this is still null or empty - we cant query the database properly later on - // Query OneDrive API for parent details - if ((parent.driveId == "") && (parent.id == "")){ - try { - log.vdebug("Attempting to query OneDrive for this parent path: ", parentPath); - onedrivePathDetails = onedrive.getPathDetails(parentPath); - } catch (OneDriveException e) { - log.vdebug("onedrivePathDetails = onedrive.getPathDetails(parentPath); generated a OneDriveException"); - // exception - set onedriveParentRootDetails to a blank valid JSON - onedrivePathDetails = parseJSON("{}"); - if (e.httpStatusCode == 404) { - // Parent does not exist ... need to create parent - log.vdebug("Parent path does not exist: ", parentPath); - uploadCreateDir(parentPath); - } + + // Check if this is excluded by config option: skip_file + if (!clientSideRuleExcludesPath) { + // is the item a file ? + if (isFileItem(onedriveJSONItem)) { + // JSON item is a file + + // skip_file can contain 4 types of entries: + // - wildcard - *.txt + // - text + wildcard - name*.txt + // - full path + combination of any above two - /path/name*.txt + // - full path to file - /path/to/file.txt + + string exclusionTestPath = ""; + + // is the parent id in the database? + if (parentInDatabase) { + // parent id is in the database, so we can try and calculate the full file path + string jsonItemPath = ""; - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadCreateDir(path);"); - uploadCreateDir(path); - // return back to original call - return; + // Compute this item path & need the full path for this file + jsonItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + // Log the calculation + log.vdebug("New Item calculated full path is: ", jsonItemPath); + + // The path that needs to be checked needs to include the '/' + // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched + // However, as 'path' used throughout, use a temp variable with this modification so that we use the temp variable for exclusion checks + if (!startsWith(jsonItemPath, "/")){ + // Add '/' to the path + exclusionTestPath = '/' ~ jsonItemPath; } - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; + // what are we checking + log.vdebug("skip_file item to check (full calculated path): ", exclusionTestPath); + } else { + // parent not in database, we can only check using this JSON item's name + if (!startsWith(thisItemName, "/")){ + // Add '/' to the path + exclusionTestPath = '/' ~ thisItemName; } + + // what are we checking + log.vdebug("skip_file item to check (file name only - parent path not in database): ", exclusionTestPath); + clientSideRuleExcludesPath = selectiveSync.isFileNameExcluded(exclusionTestPath); } - // configure the parent item data - if (hasId(onedrivePathDetails) && hasParentReference(onedrivePathDetails)){ - log.vdebug("Parent path found, configuring parent item"); - parent.id = onedrivePathDetails["id"].str; // This item's ID. Should give something like 12345ABCDE1234A1!101 - parent.driveId = onedrivePathDetails["parentReference"]["driveId"].str; // Should give something like 12345abcde1234a1 - } else { - // OneDrive API query failed - // Assume client defaults - log.vdebug("Parent path could not be queried, using OneDrive account defaults"); - parent.id = defaultRootId; // Should give something like 12345ABCDE1234A1!101 - parent.driveId = defaultDriveId; // Should give something like 12345abcde1234a1 + // Perform the 'skip_file' evaluation + clientSideRuleExcludesPath = selectiveSync.isFileNameExcluded(exclusionTestPath); + log.vdebug("Result: ", clientSideRuleExcludesPath); + if (clientSideRuleExcludesPath) { + // This path should be skipped + log.vlog("Skipping item - excluded by skip_file config: ", exclusionTestPath); } } - - JSONValue response; - // test if the path we are going to create already exists on OneDrive - try { - log.vdebug("Attempting to query OneDrive for this path: ", path); - response = onedrive.getPathDetailsByDriveId(parent.driveId, path); - } catch (OneDriveException e) { - log.vdebug("response = onedrive.getPathDetails(path); generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // The directory was not found on the drive id we queried - log.vlog("The requested directory to create was not found on OneDrive - creating remote directory: ", path); - - if (!dryRun) { - // Perform the database lookup - is the parent in the database? - if (!itemdb.selectByPath(dirName(path), parent.driveId, parent)) { - // parent is not in the database - log.vdebug("Parent path is not in the database - need to add it: ", dirName(path)); - uploadCreateDir(dirName(path)); - } - - // Is the parent a 'folder' from another user? ie - is this a 'shared folder' that has been shared with us? - if (defaultDriveId == parent.driveId){ - // enforce check of parent path. if the above was triggered, the below will generate a sync retry and will now be sucessful - enforce(itemdb.selectByPath(dirName(path), parent.driveId, parent), "The parent item id is not in the database"); - } else { - log.vdebug("Parent drive ID is not our drive ID - parent most likely a shared folder"); - } - - JSONValue driveItem = [ - "name": JSONValue(baseName(path)), - "folder": parseJSON("{}") - ]; - - // Submit the creation request - // Fix for https://github.com/skilion/onedrive/issues/356 - try { - // Attempt to create a new folder on the configured parent driveId & parent id - response = onedrive.createById(parent.driveId, parent.id, driveItem); - } catch (OneDriveException e) { - if (e.httpStatusCode == 409) { - // OneDrive API returned a 404 (above) to say the directory did not exist - // but when we attempted to create it, OneDrive responded that it now already exists - log.vlog("OneDrive reported that ", path, " already exists .. OneDrive API race condition"); - return; - } else { - // some other error from OneDrive was returned - display what it is - log.error("OneDrive generated an error when creating this path: ", path); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - } - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); + } + + // Check if this is included or excluded by use of sync_list + if (!clientSideRuleExcludesPath) { + // No need to try and process something against a sync_list if it has been configured + if (syncListConfigured) { + // Compute the item path if empty - as to check sync_list we need an actual path to check + + // What is the path of the new item + string newItemPath; + + // Is the parent in the database? If not, we cannot compute the the full path based on the database entries + // In a --resync scenario - the database is empty + if (parentInDatabase) { + // Calculate this items path based on database entries + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + } else { + // parent not in the database + if (("path" in onedriveJSONItem["parentReference"]) != null) { + // If there is a parent reference path, try and use it + string selfBuiltPath = onedriveJSONItem["parentReference"]["path"].str ~ "/" ~ onedriveJSONItem["name"].str; + auto splitPath = selfBuiltPath.split("root:"); + newItemPath = splitPath[1]; } else { - // Simulate a successful 'directory create' & save it to the dryRun database copy - // The simulated response has to pass 'makeItem' as part of saveItem - auto fakeResponse = createFakeResponse(path); - saveItem(fakeResponse); + // no parent reference path available + newItemPath = thisItemName; } - - log.vlog("Successfully created the remote directory ", path, " on OneDrive"); - return; } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadCreateDir(path);"); - uploadCreateDir(path); - // return back to original call - return; + // Update newItemPath + if(newItemPath[0] == '/') { + newItemPath = newItemPath[1..$]; } - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; - } - } - - // response from OneDrive has to be a valid JSON object - if (response.type() == JSONType.object){ - // https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file - // Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same, - // even though some file systems (such as a POSIX-compliant file system) may consider them as different. - // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior. + // What path are we checking? + log.vdebug("sync_list item to check: ", newItemPath); - if (response["name"].str == baseName(path)){ - // OneDrive 'name' matches local path name - log.vlog("The requested directory to create was found on OneDrive - skipping creating the directory: ", path ); - // Check that this path is in the database - if (!itemdb.selectById(parent.driveId, parent.id, parent)){ - // parent for 'path' is NOT in the database - log.vlog("The parent for this path is not in the local database - need to add parent to local database"); - parentPath = dirName(path); - // add the parent into the database - uploadCreateDir(parentPath); - // save this child item into the database - log.vlog("The parent for this path has been added to the local database - adding requested path (", path ,") to database"); - if (!dryRun) { - // save the live data - saveItem(response); - } else { - // need to fake this data - auto fakeResponse = createFakeResponse(path); - saveItem(fakeResponse); - } + // Unfortunatly there is no avoiding this call to check if the path is excluded|included via sync_list + if (selectiveSync.isPathExcludedViaSyncList(newItemPath)) { + // selective sync advised to skip, however is this a file and are we configured to upload / download files in the root? + if ((isItemFile(onedriveJSONItem)) && (appConfig.getValueBool("sync_root_files")) && (rootName(newItemPath) == "") ) { + // This is a file + // We are configured to sync all files in the root + // This is a file in the logical root + clientSideRuleExcludesPath = false; } else { - // parent is in database - log.vlog("The parent for this path is in the local database - adding requested path (", path ,") to database"); - // are we in a --dry-run scenario? - if (!dryRun) { - // get the live data - JSONValue pathDetails; - try { - pathDetails = onedrive.getPathDetailsByDriveId(parent.driveId, path); - } catch (OneDriveException e) { - log.vdebug("pathDetails = onedrive.getPathDetailsByDriveId(parent.driveId, path) generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // The directory was not found - log.error("ERROR: The requested single directory to sync was not found on OneDrive"); - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling onedrive.getPathDetailsByDriveId(parent.driveId, path);"); - pathDetails = onedrive.getPathDetailsByDriveId(parent.driveId, path); - } - - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; - } - } - - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(pathDetails); - - // OneDrive Personal Shared Folder edgecase handling - // In a: - // --resync --upload-only --single-directory 'dir' scenario, and where the root 'dir' for --single-directory is a 'shared folder' - // OR - // --resync --upload-only scenario, and where the root 'dir' to upload is a 'shared folder' - // - // We will not have the 'tie' DB entry created because of --upload-only because we do not download the folder structure from OneDrive - // to know what the remoteDriveId actually is - if (accountType == "personal"){ - // are we in a --resync --upload-only scenario ? - if ((cfg.getValueBool("resync")) && (cfg.getValueBool("upload_only"))) { - // Create a temp item - // Takes a JSON input and formats to an item which can be used by the database - Item tempItem = makeItem(pathDetails); - // New DB Tie item due to edge case - Item tieDBItem; - // Set the name - tieDBItem.name = tempItem.name; - // Set the correct item type - tieDBItem.type = ItemType.dir; - //parent.type = ItemType.remote; - if ((tempItem.type == ItemType.remote) && (!tempItem.remoteDriveId.empty)) { - // set the right elements - tieDBItem.driveId = tempItem.remoteDriveId; - tieDBItem.id = tempItem.remoteId; - // Set the correct mtime - tieDBItem.mtime = tempItem.mtime; - // Add tie DB record to the local database - log.vdebug("Adding tie DB record to database: ", tieDBItem); - itemdb.upsert(tieDBItem); - } - } - } - } else { - // need to fake this data - auto fakeResponse = createFakeResponse(path); - saveItem(fakeResponse); - } + // path is unwanted + clientSideRuleExcludesPath = true; + log.vlog("Skipping item - excluded by sync_list config: ", newItemPath); } - } else { - // They are the "same" name wise but different in case sensitivity - log.error("ERROR: Current directory has a 'case-insensitive match' to an existing directory on OneDrive"); - log.error("ERROR: To resolve, rename this local directory: ", buildNormalizedPath(absolutePath(path))); - log.error("ERROR: Remote OneDrive directory: ", response["name"].str); - log.log("Skipping: ", buildNormalizedPath(absolutePath(path))); - return; } - } else { - // response is not valid JSON, an error was returned from OneDrive - log.error("ERROR: There was an error performing this operation on OneDrive"); - log.error("ERROR: Increase logging verbosity to assist determining why."); - log.log("Skipping: ", buildNormalizedPath(absolutePath(path))); - return; } } + + // return if path is excluded + return clientSideRuleExcludesPath; } - // upload a new file to OneDrive - private void uploadNewFile(const(string) path) - { - // Reset upload failure - OneDrive or filesystem issue (reading data) - uploadFailed = false; - Item parent; - bool parentPathFoundInDB = false; - // Check the database for the parent path - // What parent path to use? - string parentPath = dirName(path); // will be either . or something else - if (parentPath == "."){ - // Assume this is a new file in the users configured sync_dir root - // Use client defaults - parent.id = defaultRootId; // Should give something like 12345ABCDE1234A1!101 - parent.driveId = defaultDriveId; // Should give something like 12345abcde1234a1 - parentPathFoundInDB = true; - } else { - // Query the database using each of the driveId's we are using - foreach (driveId; driveIDsArray) { - // Query the database for this parent path using each driveId - Item dbResponse; - if(itemdb.selectByPath(parentPath, driveId, dbResponse)){ - // parent path was found in the database - parent = dbResponse; - parentPathFoundInDB = true; - } - } + // Process the list of local changes to upload to OneDrive + void processChangedLocalItemsToUpload() { + + // Each element in this array 'databaseItemsWhereContentHasChanged' is an Database Item ID that has been modified locally + ulong batchSize = appConfig.concurrentThreads; + ulong batchCount = (databaseItemsWhereContentHasChanged.length + batchSize - 1) / batchSize; + ulong batchesProcessed = 0; + + // For each batch of files to upload, upload the changed data to OneDrive + foreach (chunk; databaseItemsWhereContentHasChanged.chunks(batchSize)) { + uploadChangedLocalFileToOneDrive(chunk); } + } + + // Upload changed local files to OneDrive in parallel + void uploadChangedLocalFileToOneDrive(string[3][] array) { + + foreach (i, localItemDetails; taskPool.parallel(array)) { + + log.vdebug("Thread ", i, " Starting: ", Clock.currTime()); + + // These are the details of the item we need to upload + string changedItemParentId = localItemDetails[0]; + string changedItemId = localItemDetails[1]; + string localFilePath = localItemDetails[2]; + + // How much space is remaining on OneDrive + ulong remainingFreeSpace; + // Did the upload fail? + bool uploadFailed = false; + // Did we skip due to exceeding maximum allowed size? + bool skippedMaxSize = false; + // Did we skip to an exception error? + bool skippedExceptionError = false; + + // Unfortunatly, we cant store an array of Item's ... so we have to re-query the DB again - unavoidable extra processing here + // This is because the Item[] has no other functions to allow is to parallel process those elements, so we have to use a string array as input to this function + Item dbItem; + itemDB.selectById(changedItemParentId, changedItemId, dbItem); + + // Query the available space online + // This will update appConfig.quotaAvailable & appConfig.quotaRestricted values + remainingFreeSpace = getRemainingFreeSpace(dbItem.driveId); + + // Get the file size + ulong thisFileSizeLocal = getSize(localFilePath); + ulong thisFileSizeFromDB = to!ulong(dbItem.size); + + // remainingFreeSpace online includes the current file online + // we need to remove the online file (add back the existing file size) then take away the new local file size to get a new approximate value + ulong calculatedSpaceOnlinePostUpload = (remainingFreeSpace + thisFileSizeFromDB) - thisFileSizeLocal; + + // Based on what we know, for this thread - can we safely upload this modified local file? + log.vdebug("This Thread Current Free Space Online: ", remainingFreeSpace); + log.vdebug("This Thread Calculated Free Space Online Post Upload: ", calculatedSpaceOnlinePostUpload); - // Get the file size - long thisFileSize = getSize(path); - // Can we upload this file - is there enough free space? - https://github.com/skilion/onedrive/issues/73 - // We can only use 'remainingFreeSpace' if we are uploading to our driveId ... if this is a shared folder, we have no visibility of space available, as quota details are not provided by the OneDrive API - if (parent.driveId == defaultDriveId) { - // the file will be uploaded to my driveId - log.vdebug("File upload destination is users default driveId .."); - // are quota details being restricted? - if (!quotaRestricted) { - // quota is not being restricted - we can track drive space allocation to determine if it is possible to upload the file - if ((remainingFreeSpace - thisFileSize) < 0) { - // no space to upload file, based on tracking of quota values - quotaAvailable = false; + JSONValue uploadResponse; + + bool spaceAvailableOnline = false; + // If 'personal' accounts, if driveId == defaultDriveId, then we will have data - appConfig.quotaAvailable will be updated + // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - appConfig.quotaRestricted will be set as true + // If 'business' accounts, if driveId == defaultDriveId, then we will have data + // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value - appConfig.quotaRestricted will be set as true + + // What was the latest getRemainingFreeSpace() value? + if (appConfig.quotaAvailable) { + // Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload? + if (calculatedSpaceOnlinePostUpload > 0) { + // Based on this thread action, we beleive that there is space available online to upload - proceed + spaceAvailableOnline = true; + } + } + // Is quota being restricted? + if (appConfig.quotaRestricted) { + // Space available online is being restricted - so we have no way to really know if there is space available online + spaceAvailableOnline = true; + } + + // Do we have space available or is space available being restricted (so we make the blind assumption that there is space available) + if (spaceAvailableOnline) { + // Does this file exceed the maximum file size to upload to OneDrive? + if (thisFileSizeLocal <= maxUploadFileSize) { + // Attempt to upload the modified file + // Error handling is in performModifiedFileUpload(), and the JSON that is responded with - will either be null or a valid JSON object containing the upload result + uploadResponse = performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); + + // Evaluate the returned JSON uploadResponse + // If there was an error uploading the file, uploadResponse should be empty and invalid + if (uploadResponse.type() != JSONType.object){ + uploadFailed = true; + skippedExceptionError = true; + } + } else { - // there is free space to upload file, based on tracking of quota values - quotaAvailable = true; + // Skip file - too large + uploadFailed = true; + skippedMaxSize = true; } } else { - // set quotaAvailable as true, even though we have zero way to validate that this is correct or not - quotaAvailable = true; + // Cant upload this file - no space available + uploadFailed = true; } - } else { - // the file will be uploaded to a shared folder - // we can't track if there is enough free space to upload the file - log.vdebug("File upload destination is a shared folder - the upload may fail if not enough space on OneDrive .."); - // set quotaAvailable as true, even though we have zero way to validate that this is correct or not - quotaAvailable = true; - } - - // If performing a dry-run or parentPath is found in the database & there is quota available to upload file - if ((dryRun) || (parentPathFoundInDB && quotaAvailable)) { - // Maximum file size upload - // https://support.microsoft.com/en-us/office/invalid-file-names-and-file-types-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us - // July 2020, maximum file size for all accounts is 100GB - // January 2021, maximum file size for all accounts is 250GB - auto maxUploadFileSize = 268435456000; // 250GB - - // Can we read the file - as a permissions issue or file corruption will cause a failure - // https://github.com/abraunegg/onedrive/issues/113 - if (readLocalFile(path)){ - // we are able to read the file - // To avoid a 409 Conflict error - does the file actually exist on OneDrive already? - JSONValue fileDetailsFromOneDrive; - if (thisFileSize <= maxUploadFileSize){ - // Resolves: https://github.com/skilion/onedrive/issues/121, https://github.com/skilion/onedrive/issues/294, https://github.com/skilion/onedrive/issues/329 - // Does this 'file' already exist on OneDrive? - try { - // test if the local path exists on OneDrive - // if parent.driveId is invalid, then API call will generate a 'HTTP 400 - Bad Request' - make sure we at least have a valid parent.driveId - if (!parent.driveId.empty) { - // use configured value for parent.driveId - fileDetailsFromOneDrive = onedrive.getPathDetailsByDriveId(parent.driveId, path); - } else { - // switch to using defaultDriveId - log.vdebug("parent.driveId is empty - using defaultDriveId for API call"); - fileDetailsFromOneDrive = onedrive.getPathDetailsByDriveId(defaultDriveId, path); + + // Did the upload fail? + if (uploadFailed) { + // Upload failed .. why? + // No space available online + if (!spaceAvailableOnline) { + log.logAndNotify("Skipping uploading modified file ", localFilePath, " due to insufficient free space available on OneDrive"); + } + // File exceeds max allowed size + if (skippedMaxSize) { + log.logAndNotify("Skipping uploading this modified file as it exceeds the maximum size allowed by OneDrive: ", localFilePath); + } + // Generic message + if (skippedExceptionError) { + // normal failure message if API or exception error generated + log.logAndNotify("Uploading modified file ", localFilePath, " ... failed!"); + } + } else { + // Upload was successful + log.logAndNotify("Uploading modified file ", localFilePath, " ... done."); + + // Save JSON item in database + saveItem(uploadResponse); + + if (!dryRun) { + // Check the integrity of the uploaded modified file + performUploadIntegrityValidationChecks(uploadResponse, localFilePath, thisFileSizeLocal); + + // Update the date / time of the file online to match the local item + // Get the local file last modified time + SysTime localModifiedTime = timeLastModified(localFilePath).toUTC(); + localModifiedTime.fracSecs = Duration.zero; + // Get the latest eTag, and use that + string etagFromUploadResponse = uploadResponse["eTag"].str; + // Attempt to update the online date time stamp based on our local data + uploadLastModifiedTime(dbItem.driveId, dbItem.id, localModifiedTime, etagFromUploadResponse); + } + } + + log.vdebug("Thread ", i, " Finished: ", Clock.currTime()); + + } // end of 'foreach (i, localItemDetails; array.enumerate)' + } + + // Perform the upload of a locally modified file to OneDrive + JSONValue performModifiedFileUpload(Item dbItem, string localFilePath, ulong thisFileSizeLocal) { + + JSONValue uploadResponse; + OneDriveApi uploadFileOneDriveApiInstance; + uploadFileOneDriveApiInstance = new OneDriveApi(appConfig); + uploadFileOneDriveApiInstance.initialise(); + + // Is this a dry-run scenario? + if (!dryRun) { + // Do we use simpleUpload or create an upload session? + bool useSimpleUpload = false; + + //if ((appConfig.accountType == "personal") && (thisFileSizeLocal <= sessionThresholdFileSize)) { + + if (thisFileSizeLocal <= sessionThresholdFileSize) { + useSimpleUpload = true; + } + + // We can only upload zero size files via simpleFileUpload regardless of account type + // Reference: https://github.com/OneDrive/onedrive-api-docs/issues/53 + // Additionally, all files where file size is < 4MB should be uploaded by simpleUploadReplace - everything else should use a session to upload the modified file + + if ((thisFileSizeLocal == 0) || (useSimpleUpload)) { + // Must use Simple Upload to replace the file online + try { + uploadResponse = uploadFileOneDriveApiInstance.simpleUploadReplace(localFilePath, dbItem.driveId, dbItem.id); + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); } - } catch (OneDriveException e) { - // log that we generated an exception - log.vdebug("fileDetailsFromOneDrive = onedrive.getPathDetailsByDriveId(parent.driveId, path); generated a OneDriveException"); - // OneDrive returned a 'HTTP/1.1 400 Bad Request' - // If the 'path', when encoded, cannot be interpreted by the OneDrive API, the API will generate a 400 error - if (e.httpStatusCode == 400) { - log.log("Skipping uploading this new file: ", buildNormalizedPath(absolutePath(path))); - log.vlog("Skipping item - OneDrive returned a 'HTTP 400 - Bad Request' when attempting to query if file exists"); - log.error("ERROR: To resolve, rename this local file: ", buildNormalizedPath(absolutePath(path))); - uploadFailed = true; - return; + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to upload a modified file to OneDrive - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); } - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - if (e.httpStatusCode == 401) { - log.vlog("Skipping item - OneDrive returned a 'HTTP 401 - Unauthorized' when attempting to query if file exists"); - uploadFailed = true; - return; + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.vdebug("Retrying Function: ", thisFunctionName); + performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + } + + } catch (FileException e) { + // filesystem error + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } else { + // Configure JSONValue variables we use for a session upload + JSONValue currentOnlineData; + JSONValue uploadSessionData; + string currentETag; + + // As this is a unique thread, the sessionFilePath for where we save the data needs to be unique + // The best way to do this is calculate the CRC32 of the file, and use this as the suffix of the session file we save + string threadUploadSessionFilePath = appConfig.uploadSessionFilePath ~ "." ~ computeCRC32(localFilePath); + + // Get the absolute latest object details from online + try { + currentOnlineData = uploadFileOneDriveApiInstance.getPathDetailsByDriveId(dbItem.driveId, localFilePath); + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); } - // A 404 is the expected response if the file was not present - if (e.httpStatusCode == 404) { - // The file was not found on OneDrive, need to upload it - // Check if file should be skipped based on skip_size config - if (thisFileSize >= this.newSizeLimit) { - log.vlog("Skipping item - excluded by skip_size config: ", path, " (", thisFileSize/2^^20," MB)"); - return; - } - - // start of upload file - write("Uploading new file ", path, " ... "); - JSONValue response; - - // Calculate upload speed - auto uploadStartTime = Clock.currTime(); - - if (!dryRun) { - // Resolve https://github.com/abraunegg/onedrive/issues/37 - if (thisFileSize == 0){ - // We can only upload zero size files via simpleFileUpload regardless of account type - // https://github.com/OneDrive/onedrive-api-docs/issues/53 - try { - response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - // error uploading file - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request"); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } else { - // File is not a zero byte file - // Are we using OneDrive Personal or OneDrive Business? - // To solve 'Multiple versions of file shown on website after single upload' (https://github.com/abraunegg/onedrive/issues/2) - // check what 'account type' this is as this issue only affects OneDrive Business so we need some extra logic here - if (accountType == "personal"){ - // Original file upload logic - if (thisFileSize <= thresholdFileSize) { - try { - response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } - - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request as a session"); - // Try upload as a session - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - // error uploading file - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } else { - // File larger than threshold - use a session to upload - writeln(""); - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request"); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } - } else { - // OneDrive Business Account - always use a session to upload - writeln(""); - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request"); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } - } - - // response from OneDrive has to be a valid JSON object - if (response.type() == JSONType.object){ - // upload done without error - writeln("done."); - - // upload finished - auto uploadFinishTime = Clock.currTime(); - auto uploadDuration = uploadFinishTime - uploadStartTime; - log.vdebug("File Size: ", thisFileSize, " Bytes"); - log.vdebug("Upload Duration: ", (uploadDuration.total!"msecs"/1e3), " Seconds"); - auto uploadSpeed = (thisFileSize / (uploadDuration.total!"msecs"/1e3)/ 1024 / 1024); - log.vdebug("Upload Speed: ", uploadSpeed, " Mbps (approx)"); - - // Log upload action to log file - log.fileOnly("Uploading new file ", path, " ... done."); - // The file was uploaded, or a 4xx / 5xx error was generated - if ("size" in response){ - // The response JSON contains size, high likelihood valid response returned - ulong uploadFileSize = response["size"].integer; - - // In some cases the file that was uploaded was not complete, but 'completed' without errors on OneDrive - // This has been seen with PNG / JPG files mainly, which then contributes to generating a 412 error when we attempt to update the metadata - // Validate here that the file uploaded, at least in size, matches in the response to what the size is on disk - if (thisFileSize != uploadFileSize){ - // Upload size did not match local size - // There are 2 scenarios where this happens: - // 1. Failed Transfer - // 2. Upload file is going to a SharePoint Site, where Microsoft enriches the file with additional metadata with no way to disable - // For this client: - // - If a SharePoint Library, disableUploadValidation gets flagged as True - // - If we are syncing a business shared folder, this folder could reside on a Users Path (there should be no upload issue) or SharePoint (upload issue) - if ((disableUploadValidation)|| (syncBusinessFolders && (parent.driveId != defaultDriveId))){ - // Print a warning message - should only be triggered if: - // - disableUploadValidation gets flagged (documentLibrary account type) - // - syncBusinessFolders is being used & parent.driveId != defaultDriveId - log.log("WARNING: Uploaded file size does not match local file - skipping upload validation"); - log.vlog("WARNING: Due to Microsoft Sharepoint 'enrichment' of files, this file is now technically different to your local copy"); - log.vlog("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details"); - } else { - // OK .. the uploaded file does not match and we did not disable this validation - log.log("Uploaded file size does not match local file - upload failure - retrying"); - // Delete uploaded bad file - onedrive.deleteById(response["parentReference"]["driveId"].str, response["id"].str, response["eTag"].str); - // Re-upload - uploadNewFile(path); - return; - } - } - - // File validation is OK - if ((accountType == "personal") || (thisFileSize == 0)){ - // Update the item's metadata on OneDrive - string id = response["id"].str; - string cTag; - - // Is there a valid cTag in the response? - if ("cTag" in response) { - // use the cTag instead of the eTag because OneDrive may update the metadata of files AFTER they have been uploaded - cTag = response["cTag"].str; - } else { - // Is there an eTag in the response? - if ("eTag" in response) { - // use the eTag from the response as there was no cTag - cTag = response["eTag"].str; - } else { - // no tag available - set to nothing - cTag = ""; - } - } - // check if the path exists locally before we try to set the file times - if (exists(path)) { - SysTime mtime = timeLastModified(path).toUTC(); - // update the file modified time on OneDrive and save item details to database - uploadLastModifiedTime(parent.driveId, id, cTag, mtime); - } else { - // will be removed in different event! - log.log("File disappeared after upload: ", path); - } - } else { - // OneDrive Business Account - always use a session to upload - // The session includes a Request Body element containing lastModifiedDateTime - // which negates the need for a modify event against OneDrive - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } - } - - // update free space tracking if this is our drive id - if (parent.driveId == defaultDriveId) { - // how much space is left on OneDrive after upload? - remainingFreeSpace = (remainingFreeSpace - thisFileSize); - log.vlog("Remaining free space on OneDrive: ", remainingFreeSpace); - } - // File uploaded successfully, space details updated if required - return; - } else { - // response is not valid JSON, an error was returned from OneDrive - log.fileOnly("Uploading new file ", path, " ... error"); - uploadFailed = true; - return; - } - } else { - // we are --dry-run - simulate the file upload - writeln("done."); - response = createFakeResponse(path); - // Log action to log file - log.fileOnly("Uploading new file ", path, " ... done."); - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - return; - } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to obtain latest file details from OneDrive - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); } - // OneDrive returned a '429 - Too Many Requests' - if (e.httpStatusCode == 429) { + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.vdebug("Retrying Function: ", thisFunctionName); + performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + } + + } + + // Was a valid JSON response provided? + if (currentOnlineData.type() == JSONType.object) { + // Does the response contain an eTag? + if (hasETag(currentOnlineData)) { + // Use the value returned from online + currentETag = currentOnlineData["eTag"].str; + } else { + // Use the database value + currentETag = dbItem.eTag; + } + } else { + // no valid JSON response + currentETag = dbItem.eTag; + } + + // Create the Upload Session + try { + uploadSessionData = createSessionFileUpload(uploadFileOneDriveApiInstance, localFilePath, dbItem.driveId, dbItem.parentId, baseName(localFilePath), currentETag, threadUploadSessionFilePath); + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadNewFile(path);"); - uploadNewFile(path); - // return back to original call - return; + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); } - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - if (e.httpStatusCode >= 500) { - uploadFailed = true; - return; + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to create an upload session on OneDrive - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.vdebug("Retrying Function: ", thisFunctionName); + performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); } - // Check that the filename that is returned is actually the file we wish to upload - // https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file - // Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same, - // even though some file systems (such as a POSIX-compliant file system) may consider them as different. - // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior. + } catch (FileException e) { + writeln("DEBUG TO REMOVE: Modified file upload FileException Handling (Create the Upload Session)"); + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + + // Perform the Upload using the session + try { + uploadResponse = performSessionFileUpload(uploadFileOneDriveApiInstance, thisFileSizeLocal, uploadSessionData, threadUploadSessionFilePath); + } catch (OneDriveException exception) { - // fileDetailsFromOneDrive has to be a valid object - if (fileDetailsFromOneDrive.type() == JSONType.object){ - // fileDetailsFromOneDrive = onedrive.getPathDetails(path) returned a valid JSON, meaning the file exists on OneDrive - // Check that 'name' is in the JSON response (validates data) and that 'name' == the path we are looking for - if (("name" in fileDetailsFromOneDrive) && (fileDetailsFromOneDrive["name"].str == baseName(path))) { - // OneDrive 'name' matches local path name - log.vlog("Requested file to upload exists on OneDrive - local database is out of sync for this file: ", path); - - // Is the local file newer than the uploaded file? - SysTime localFileModifiedTime = timeLastModified(path).toUTC(); - SysTime remoteFileModifiedTime = SysTime.fromISOExtString(fileDetailsFromOneDrive["fileSystemInfo"]["lastModifiedDateTime"].str); - localFileModifiedTime.fracSecs = Duration.zero; - - if (localFileModifiedTime > remoteFileModifiedTime){ - // local file is newer - log.vlog("Requested file to upload is newer than existing file on OneDrive"); - write("Uploading modified file ", path, " ... "); - JSONValue response; - - if (!dryRun) { - if (accountType == "personal"){ - // OneDrive Personal account upload handling - if (thisFileSize <= thresholdFileSize) { - try { - response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); - writeln("done."); - } catch (OneDriveException e) { - log.vdebug("response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); generated a OneDriveException"); - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadNewFile(path);"); - uploadNewFile(path); - // return back to original call - return; - } - - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request as a session"); - // Try upload as a session - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - writeln("done."); - } catch (OneDriveException e) { - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // error uploading file - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } else { - // File larger than threshold - use a session to upload - writeln(""); - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - writeln("done."); - } catch (OneDriveException e) { - log.vdebug("response = session.upload(path, parent.driveId, parent.id, baseName(path)); generated a OneDriveException"); - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadNewFile(path);"); - uploadNewFile(path); - // return back to original call - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request"); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // error uploading file - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } - - // response from OneDrive has to be a valid JSON object - if (response.type() == JSONType.object){ - // response is a valid JSON object - string id = response["id"].str; - string cTag; - - // Is there a valid cTag in the response? - if ("cTag" in response) { - // use the cTag instead of the eTag because Onedrive may update the metadata of files AFTER they have been uploaded - cTag = response["cTag"].str; - } else { - // Is there an eTag in the response? - if ("eTag" in response) { - // use the eTag from the response as there was no cTag - cTag = response["eTag"].str; - } else { - // no tag available - set to nothing - cTag = ""; - } - } - // validate if path exists so mtime can be calculated - if (exists(path)) { - SysTime mtime = timeLastModified(path).toUTC(); - uploadLastModifiedTime(parent.driveId, id, cTag, mtime); - } else { - // will be removed in different event! - log.log("File disappeared after upload: ", path); - } - } else { - // Log that an invalid JSON object was returned - log.vdebug("onedrive.simpleUpload or session.upload call returned an invalid JSON Object"); - return; - } - } else { - // OneDrive Business account modified file upload handling - if (accountType == "business"){ - // OneDrive Business Account - if ((!syncBusinessFolders) || (parent.driveId == defaultDriveId)) { - // If we are not syncing Shared Business Folders, or this change is going to the 'users' default drive, handle normally - // For logging consistency - writeln(""); - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path), fileDetailsFromOneDrive["eTag"].str); - } catch (OneDriveException e) { - log.vdebug("response = session.upload(path, parent.driveId, parent.id, baseName(path), fileDetailsFromOneDrive['eTag'].str); generated a OneDriveException"); - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadNewFile(path);"); - uploadNewFile(path); - // return back to original call - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request"); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // error uploading file - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - // upload complete - writeln("done."); - saveItem(response); - } else { - // If we are uploading to a shared business folder, there are a couple of corner cases here: - // 1. Shared Folder is a 'users' folder - // 2. Shared Folder is a 'SharePoint Library' folder, meaning we get hit by this stupidity: https://github.com/OneDrive/onedrive-api-docs/issues/935 - - // Need try{} & catch (OneDriveException e) { & catch (FileException e) { handler for this query - response = handleSharePointMetadataAdditionBugReplaceFile(fileDetailsFromOneDrive, parent, path); - if (!uploadFailed){ - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } else { - // uploadFailed, return - return; - } - } - } - - // OneDrive SharePoint account modified file upload handling - if (accountType == "documentLibrary"){ - // Depending on the file size, this will depend on how best to handle the modified local file - // as if too large, the following error will be generated by OneDrive: - // HTTP request returned status code 413 (Request Entity Too Large) - // We also cant use a session to upload the file, we have to use simpleUploadReplace - - // Need try{} & catch (OneDriveException e) { & catch (FileException e) { handler for this query - response = handleSharePointMetadataAdditionBugReplaceFile(fileDetailsFromOneDrive, parent, path); - if (!uploadFailed){ - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } else { - // uploadFailed, return - return; - } - } - } - - // Log action to log file - log.fileOnly("Uploading modified file ", path, " ... done."); - - // update free space tracking if this is our drive id - if (parent.driveId == defaultDriveId) { - // how much space is left on OneDrive after upload? - remainingFreeSpace = (remainingFreeSpace - thisFileSize); - log.vlog("Remaining free space on OneDrive: ", remainingFreeSpace); - } - } else { - // we are --dry-run - simulate the file upload - writeln("done."); - response = createFakeResponse(path); - // Log action to log file - log.fileOnly("Uploading modified file ", path, " ... done."); - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - return; - } - } else { - // Save the details of the file that we got from OneDrive - // --dry-run safe - log.vlog("Updating the local database with details for this file: ", path); - if (!dryRun) { - // use the live data - saveItem(fileDetailsFromOneDrive); - } else { - // need to fake this data - auto fakeResponse = createFakeResponse(path); - saveItem(fakeResponse); - } - } - } else { - // The files are the "same" name wise but different in case sensitivity - log.error("ERROR: A local file has the same name as another local file."); - log.error("ERROR: To resolve, rename this local file: ", buildNormalizedPath(absolutePath(path))); - log.log("Skipping uploading this new file: ", buildNormalizedPath(absolutePath(path))); + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to upload a file via a session to OneDrive - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.vdebug("Retrying Function: ", thisFunctionName); + performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); } else { - // fileDetailsFromOneDrive is not valid JSON, an error was returned from OneDrive - log.error("ERROR: An error was returned from OneDrive and the resulting response is not a valid JSON object"); - log.error("ERROR: Increase logging verbosity to assist determining why."); - uploadFailed = true; - return; + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); } - } else { - // Skip file - too large - log.log("Skipping uploading this new file as it exceeds the maximum size allowed by OneDrive: ", path); - uploadFailed = true; - return; + + } catch (FileException e) { + writeln("DEBUG TO REMOVE: Modified file upload FileException Handling (Perform the Upload using the session)"); + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } - } else { - // unable to read local file - log.log("Skipping uploading this file as it cannot be read (file permissions or file corruption): ", path); } + } else { - // Upload of the new file did not occur .. why? - if (!parentPathFoundInDB) { - // Parent path was not found - log.log("Skipping uploading this new file as parent path is not in the database: ", path); - uploadFailed = true; - return; - } - if (!quotaAvailable) { - // Not enough free space - log.log("Skipping item '", path, "' due to insufficient free space available on OneDrive"); - uploadFailed = true; - return; - } + // We are in a --dry-run scenario + uploadResponse = createFakeResponse(localFilePath); } + + // Debug Log the modified upload response + log.vdebug("Modified File Upload Response: ", uploadResponse); + + // Shutdown the API instance + uploadFileOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(uploadFileOneDriveApiInstance); + // Return JSON + return uploadResponse; } - - private JSONValue handleSharePointMetadataAdditionBugReplaceFile(JSONValue fileDetailsFromOneDrive, const ref Item parent, const(string) path) - { - // Explicit function for handling https://github.com/OneDrive/onedrive-api-docs/issues/935 - // Replace existing file - JSONValue response; - // Depending on the file size, this will depend on how best to handle the modified local file - // as if too large, the following error will be generated by OneDrive: - // HTTP request returned status code 413 (Request Entity Too Large) - // We also cant use a session to upload the file, we have to use simpleUploadReplace + // Query the OneDrive API using the provided driveId to get the latest quota details + ulong getRemainingFreeSpace(string driveId) { + + // Get the quota details for this driveId, as this could have changed since we started the application - the user could have added / deleted data online, or purchased additional storage + // Quota details are ONLY available for the main default driveId, as the OneDrive API does not provide quota details for shared folders - // Calculate existing hash for this file - string existingFileHash = computeQuickXorHash(path); + JSONValue currentDriveQuota; + ulong remainingQuota; - if (getSize(path) <= thresholdFileSize) { - // Upload file via simpleUploadReplace as below threshold size - try { - response = onedrive.simpleUploadReplace(path, fileDetailsFromOneDrive["parentReference"]["driveId"].str, fileDetailsFromOneDrive["id"].str, fileDetailsFromOneDrive["eTag"].str); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return response; + try { + // Create a new OneDrive API instance + OneDriveApi getCurrentDriveQuotaApiInstance; + getCurrentDriveQuotaApiInstance = new OneDriveApi(appConfig); + getCurrentDriveQuotaApiInstance.initialise(); + log.vdebug("Seeking available quota for this drive id: ", driveId); + currentDriveQuota = getCurrentDriveQuotaApiInstance.getDriveQuota(driveId); + // Shut this API instance down + getCurrentDriveQuotaApiInstance.shutdown(); + // Free object and memory + object.destroy(getCurrentDriveQuotaApiInstance); + } catch (OneDriveException e) { + log.vdebug("currentDriveQuota = onedrive.getDriveQuota(driveId) generated a OneDriveException"); + } + + // validate that currentDriveQuota is a JSON value + if (currentDriveQuota.type() == JSONType.object) { + // Response from API contains valid data + // If 'personal' accounts, if driveId == defaultDriveId, then we will have data + // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data + // If 'business' accounts, if driveId == defaultDriveId, then we will have data + // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value + + if ("quota" in currentDriveQuota){ + if (driveId == appConfig.defaultDriveId) { + // We potentially have updated quota remaining details available + // However in some cases OneDrive Business configurations 'restrict' quota details thus is empty / blank / negative value / zero + if ("remaining" in currentDriveQuota["quota"]){ + // We have valid quota remaining details returned for the provided drive id + remainingQuota = currentDriveQuota["quota"]["remaining"].integer; + + if (remainingQuota <= 0) { + if (appConfig.accountType == "personal"){ + // zero space available + log.error("ERROR: OneDrive account currently has zero space available. Please free up some space online or purchase additional space."); + remainingQuota = 0; + appConfig.quotaAvailable = false; + } else { + // zero space available is being reported, maybe being restricted? + log.error("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); + remainingQuota = 0; + appConfig.quotaRestricted = true; + } + } + } } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; + // quota details returned, but for a drive id that is not ours + if ("remaining" in currentDriveQuota["quota"]){ + // remaining is in the quota JSON response + if (currentDriveQuota["quota"]["remaining"].integer <= 0) { + // value returned is 0 or less than 0 + log.vlog("OneDrive quota information is set at zero, as this is not our drive id, ignoring"); + remainingQuota = 0; + appConfig.quotaRestricted = true; + } + } } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; - } - } else { - // Have to upload via a session, however we have to delete the file first otherwise this will generate a 404 error post session upload - // Remove the existing file - onedrive.deleteById(fileDetailsFromOneDrive["parentReference"]["driveId"].str, fileDetailsFromOneDrive["id"].str, fileDetailsFromOneDrive["eTag"].str); - // Upload as a session, as a new file - writeln(""); - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return response; + } else { + // No quota details returned + if (driveId == appConfig.defaultDriveId) { + // no quota details returned for current drive id + log.error("ERROR: OneDrive quota information is missing. Potentially your OneDrive account currently has zero space available. Please free up some space online or purchase additional space."); + remainingQuota = 0; + appConfig.quotaRestricted = true; } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; + // quota details not available + log.vdebug("WARNING: OneDrive quota information is being restricted as this is not our drive id."); + remainingQuota = 0; + appConfig.quotaRestricted = true; } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; } } - writeln("done."); - // Due to https://github.com/OneDrive/onedrive-api-docs/issues/935 Microsoft modifies all PDF, MS Office & HTML files with added XML content. It is a 'feature' of SharePoint. - // So - now the 'local' and 'remote' file is technically DIFFERENT ... thanks Microsoft .. NO way to disable this stupidity - string uploadNewFileHash; - if (hasQuickXorHash(response)) { - // use the response json hash detail to compare - uploadNewFileHash = response["file"]["hashes"]["quickXorHash"].str; - } - if (existingFileHash != uploadNewFileHash) { - // file was modified by Microsoft post upload to SharePoint site - log.vdebug("Existing Local File Hash: ", existingFileHash); - log.vdebug("New Remote File Hash: ", uploadNewFileHash); - - if(!uploadOnly){ - // Download the Microsoft 'modified' file so 'local' is now in sync - log.vlog("Due to Microsoft Sharepoint 'enrichment' of files, downloading 'enriched' file to ensure local file is in-sync"); - log.vlog("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details"); - auto fileSize = response["size"].integer; - onedrive.downloadById(response["parentReference"]["driveId"].str, response["id"].str, path, fileSize); + // what was the determined available quota? + log.vdebug("Available quota: ", remainingQuota); + return remainingQuota; + } + + // Perform a filesystem walk to uncover new data to upload to OneDrive + void scanLocalFilesystemPathForNewData(string path) { + + // To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences? + string logPath; + if (path == ".") { + // get the configured sync_dir + logPath = buildNormalizedPath(appConfig.getValueString("sync_dir")); + } else { + // use what was passed in + if (!appConfig.getValueBool("monitor")) { + logPath = buildNormalizedPath(appConfig.getValueString("sync_dir")) ~ "/" ~ path; } else { - // we are not downloading a file, warn that file differences will exist - log.vlog("WARNING: Due to Microsoft Sharepoint 'enrichment' of files, this file is now technically different to your local copy"); - log.vlog("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details"); + logPath = path; } } - // return a JSON response so that it can be used and saved - return response; - } - - // delete an item on OneDrive - private void uploadDeleteItem(Item item, const(string) path) - { - log.log("Deleting item from OneDrive: ", path); - bool flagAsBigDelete = false; - - // query the database - how many objects will this remove? - auto children = getChildren(item.driveId, item.id); - long itemsToDelete = count(children); - log.vdebug("Number of items to delete: ", itemsToDelete); - - // Are we running in monitor mode? A local delete of a file will issue a inotify event, which will trigger the local & remote data immediately - if (!cfg.getValueBool("monitor")) { - // not running in monitor mode - if (itemsToDelete > cfg.getValueLong("classify_as_big_delete")) { - // A big delete detected - flagAsBigDelete = true; - if (!cfg.getValueBool("force")) { - log.error("ERROR: An attempt to remove a large volume of data from OneDrive has been detected. Exiting client to preserve data on OneDrive"); - log.error("ERROR: To delete a large volume of data use --force or increase the config value 'classify_as_big_delete' to a larger value"); - // Must exit here to preserve data on OneDrive - onedrive.shutdown(); - exit(-1); + // Log the action that we are performing, however only if this is a directory + if (isDir(path)) { + if (!appConfig.surpressLoggingOutput) { + if (!cleanupLocalFiles) { + log.log("Scanning the local file system '", logPath, "' for new data to upload ..."); + } else { + log.log("Scanning the local file system '", logPath, "' for data to cleanup ..."); } } } - if (!dryRun) { - // we are not in a --dry-run situation, process deletion to OneDrive - if ((item.driveId == "") && (item.id == "") && (item.eTag == "")){ - // These are empty ... we cannot delete if this is empty .... - log.vdebug("item.driveId, item.id & item.eTag are empty ... need to query OneDrive for values"); - log.vdebug("Checking OneDrive for path: ", path); - JSONValue onedrivePathDetails = onedrive.getPathDetails(path); // Returns a JSON String for the OneDrive Path - log.vdebug("OneDrive path details: ", onedrivePathDetails); - item.driveId = onedrivePathDetails["parentReference"]["driveId"].str; // Should give something like 12345abcde1234a1 - item.id = onedrivePathDetails["id"].str; // This item's ID. Should give something like 12345ABCDE1234A1!101 - item.eTag = onedrivePathDetails["eTag"].str; // Should be something like aNjM2NjJFRUVGQjY2NjJFMSE5MzUuMA - } - - // do the delete - try { - // what item are we trying to delete? - log.vdebug("Attempting to delete item from drive: ", item.driveId); - log.vdebug("Attempting to delete this item id: ", item.id); - // perform the delete via the API - onedrive.deleteById(item.driveId, item.id, item.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 404) { - // item.id, item.eTag could not be found on driveId - log.vlog("OneDrive reported: The resource could not be found."); - } else { - // Not a 404 response .. is this a 401 response due to some sort of OneDrive Business security policy? - if ((e.httpStatusCode == 401) && (accountType != "personal")) { - log.vdebug("onedrive.deleteById generated a 401 error response when attempting to delete object by item id"); - auto errorArray = splitLines(e.msg); - JSONValue errorMessage = parseJSON(replace(e.msg, errorArray[0], "")); - if (errorMessage["error"]["message"].str == "Access denied. You do not have permission to perform this action or access this resource.") { - // Issue #1041 - Unable to delete OneDrive content when permissions prevent deletion - try { - log.vdebug("Attempting a reverse delete of all child objects from OneDrive"); - foreach_reverse (Item child; children) { - log.vdebug("Delete child item from drive: ", child.driveId); - log.vdebug("Delete this child item id: ", child.id); - onedrive.deleteById(child.driveId, child.id, child.eTag); - // delete the child reference in the local database - itemdb.deleteById(child.driveId, child.id); - } - log.vdebug("Delete parent item from drive: ", item.driveId); - log.vdebug("Delete this parent item id: ", item.id); - onedrive.deleteById(item.driveId, item.id, item.eTag); - } catch (OneDriveException e) { - // display what the error is - log.vdebug("A further error was generated when attempting a reverse delete of objects from OneDrive"); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - } - } - - // Not a 404 response .. is this a 403 response due to OneDrive Business Retention Policy being enabled? - if ((e.httpStatusCode == 403) && (accountType != "personal")) { - log.vdebug("onedrive.deleteById generated a 403 error response when attempting to delete object by item id"); - auto errorArray = splitLines(e.msg); - JSONValue errorMessage = parseJSON(replace(e.msg, errorArray[0], "")); - if (errorMessage["error"]["message"].str == "Request was cancelled by event received. If attempting to delete a non-empty folder, it's possible that it's on hold") { - // Issue #338 - Unable to delete OneDrive content when OneDrive Business Retention Policy is enabled - try { - log.vdebug("Attempting a reverse delete of all child objects from OneDrive"); - foreach_reverse (Item child; children) { - log.vdebug("Delete child item from drive: ", child.driveId); - log.vdebug("Delete this child item id: ", child.id); - onedrive.deleteById(child.driveId, child.id, child.eTag); - // delete the child reference in the local database - itemdb.deleteById(child.driveId, child.id); - } - log.vdebug("Delete parent item from drive: ", item.driveId); - log.vdebug("Delete this parent item id: ", item.id); - onedrive.deleteById(item.driveId, item.id, item.eTag); - } catch (OneDriveException e) { - // display what the error is - log.vdebug("A further error was generated when attempting a reverse delete of objects from OneDrive"); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - } - } else { - // Not a 403 response & OneDrive Business Account / O365 Shared Folder / Library - log.vdebug("onedrive.deleteById generated an error response when attempting to delete object by item id"); - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } + auto startTime = Clock.currTime(); + log.vdebug("Starting Filesystem Walk: ", startTime); + + // Perform the filesystem walk of this path, building an array of new items to upload + scanPathForNewData(path); + + // To finish off the processing items, this is needed to reflect this in the log + log.vdebug("------------------------------------------------------------------"); + + auto finishTime = Clock.currTime(); + log.vdebug("Finished Filesystem Walk: ", finishTime); + + auto elapsedTime = finishTime - startTime; + log.vdebug("Elapsed Time Filesystem Walk: ", elapsedTime); + + // Upload new data that has been identified + // Are there any items to download post fetching the /delta data? + if (!newLocalFilesToUploadToOneDrive.empty) { + // There are elements to upload + log.vlog("New items to upload to OneDrive: ", newLocalFilesToUploadToOneDrive.length); + + // How much data do we need to upload? This is important, as, we need to know how much data to determine if all the files can be uploaded + foreach (uploadFilePath; newLocalFilesToUploadToOneDrive) { + // validate that the path actually exists so that it can be counted + if (exists(uploadFilePath)) { + totalDataToUpload = totalDataToUpload + getSize(uploadFilePath); } } - // delete the reference in the local database - itemdb.deleteById(item.driveId, item.id); - if (item.remoteId != null) { - // If the item is a remote item, delete the reference in the local database - itemdb.deleteById(item.remoteDriveId, item.remoteId); - } - } - } - - // get the children of an item id from the database - private Item[] getChildren(string driveId, string id) - { - Item[] children; - children ~= itemdb.selectChildren(driveId, id); - foreach (Item child; children) { - if (child.type != ItemType.file) { - // recursively get the children of this child - children ~= getChildren(child.driveId, child.id); - } - } - return children; - } - - // update the item's last modified time - private void uploadLastModifiedTime(const(char)[] driveId, const(char)[] id, const(char)[] eTag, SysTime mtime) - { - string itemModifiedTime; - itemModifiedTime = mtime.toISOExtString(); - JSONValue data = [ - "fileSystemInfo": JSONValue([ - "lastModifiedDateTime": itemModifiedTime - ]) - ]; - - JSONValue response; - try { - response = onedrive.updateById(driveId, id, data, eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 412) { - // OneDrive threw a 412 error, most likely: ETag does not match current item's value - // Retry without eTag - log.vdebug("File Metadata Update Failed - OneDrive eTag / cTag match issue"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting file time stamp update - gracefully handling error"); - string nullTag = null; - response = onedrive.updateById(driveId, id, data, nullTag); - } - } - // save the updated response from OneDrive in the database - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } - - // save item details into database - private void saveItem(JSONValue jsonItem) - { - // jsonItem has to be a valid object - if (jsonItem.type() == JSONType.object){ - // Check if the response JSON has an 'id', otherwise makeItem() fails with 'Key not found: id' - if (hasId(jsonItem)) { - // Are we in a --upload-only & --remove-source-files scenario? - // We do not want to add the item to the database in this situation as there is no local reference to the file post file deletion - // If the item is a directory, we need to add this to the DB, if this is a file, we dont add this, the parent path is not in DB, thus any new files in this directory are not added - if ((uploadOnly) && (localDeleteAfterUpload) && (isItemFile(jsonItem))) { - // Log that we skipping adding item to the local DB and the reason why - log.vdebug("Skipping adding to database as --upload-only & --remove-source-files configured"); + // How many bytes to upload + if (totalDataToUpload < 1024) { + // Display as Bytes to upload + log.vlog("Total New Data to Upload: ", totalDataToUpload, " Bytes"); + } else { + if ((totalDataToUpload > 1024) && (totalDataToUpload < 1048576)) { + // Display as KB to upload + log.vlog("Total New Data to Upload: ", (totalDataToUpload / 1024), " KB"); } else { - // What is the JSON item we are trying to create a DB record with? - log.vdebug("Creating DB item from this JSON: ", jsonItem); - // Takes a JSON input and formats to an item which can be used by the database - Item item = makeItem(jsonItem); - // Add to the local database - log.vdebug("Adding to database: ", item); - itemdb.upsert(item); - - // If we have a remote drive ID, add this to our list of known drive id's - if (!item.remoteDriveId.empty) { - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, item.remoteDriveId)) { - // Add this drive id to the array to search with - driveIDsArray ~= item.remoteDriveId; - } - } + // Display as MB to upload + log.vlog("Total New Data to Upload: ", (totalDataToUpload / 1024 / 1024), " MB"); } - } else { - // log error - log.error("ERROR: OneDrive response missing required 'id' element"); - log.error("ERROR: ", jsonItem); } - } else { - // log error - log.error("ERROR: An error was returned from OneDrive and the resulting response is not a valid JSON object"); - log.error("ERROR: Increase logging verbosity to assist determining why."); + + // How much space is available (Account Drive ID) + // The file, could be uploaded to a shared folder, which, we are not tracking how much free space is available there ... + log.vdebug("Current Available Space Online (Account Drive ID): ", (appConfig.remainingFreeSpace / 1024 / 1024), " MB"); + + // Perform the upload + uploadNewLocalFileItems(); + + // Cleanup array memory + newLocalFilesToUploadToOneDrive = []; } } - - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_move - // This function is only called in monitor mode when an move event is coming from - // inotify and we try to move the item. - void uploadMoveItem(string from, string to) - { - log.log("Moving ", from, " to ", to); - - // 'to' file validation .. is the 'to' file valid for upload? - if (isSymlink(to)) { - // if config says so we skip all symlinked items - if (cfg.getValueBool("skip_symlinks")) { - log.vlog("Skipping item - skip symbolic links configured: ", to); - return; - - } - // skip unexisting symbolic links - else if (!exists(readLink(to))) { - log.logAndNotify("Skipping item - invalid symbolic link: ", to); - return; - } + + // Scan this path for new data + void scanPathForNewData(string path) { + + ulong maxPathLength; + ulong pathWalkLength; + + // Add this logging break to assist with what was checked for each path + if (path != ".") { + log.vdebug("------------------------------------------------------------------"); } - // Check against Microsoft OneDrive restriction and limitations about Windows naming files - if (!isValidName(to)) { - log.logAndNotify("Skipping item - invalid name (Microsoft Naming Convention): ", to); - return; + // https://support.microsoft.com/en-us/help/3125202/restrictions-and-limitations-when-you-sync-files-and-folders + // If the path is greater than allowed characters, then one drive will return a '400 - Bad Request' + // Need to ensure that the URI is encoded before the check is made: + // - 400 Character Limit for OneDrive Business / Office 365 + // - 430 Character Limit for OneDrive Personal + + // Configure maxPathLength based on account type + if (appConfig.accountType == "personal") { + // Personal Account + maxPathLength = 430; + } else { + // Business Account / Office365 / SharePoint + maxPathLength = 400; } - // Check for bad whitespace items - if (!containsBadWhiteSpace(to)) { - log.logAndNotify("Skipping item - invalid name (Contains an invalid whitespace item): ", to); + // A short lived item that has already disappeared will cause an error - is the path still valid? + if (!exists(path)) { + log.log("Skipping item - path has disappeared: ", path); return; } - // Check for HTML ASCII Codes as part of file name - if (!containsASCIIHTMLCodes(to)) { - log.logAndNotify("Skipping item - invalid name (Contains HTML ASCII Code): ", to); + // Calculate the path length by walking the path and catch any UTF-8 sequence errors at the same time + // https://github.com/skilion/onedrive/issues/57 + // https://github.com/abraunegg/onedrive/issues/487 + // https://github.com/abraunegg/onedrive/issues/1192 + try { + pathWalkLength = path.byGrapheme.walkLength; + } catch (std.utf.UTFException e) { + // Path contains characters which generate a UTF exception + log.logAndNotify("Skipping item - invalid UTF sequence: ", path); + log.vdebug(" Error Reason:", e.msg); return; } - // 'to' file has passed file validation - Item fromItem, toItem, parentItem; - if (!itemdb.selectByPath(from, defaultDriveId, fromItem)) { - if (cfg.getValueBool("skip_dotfiles") && isDotFile(to)){ - log.log("Skipping upload due to skip_dotfile = true"); - return; - } else { - uploadNewFile(to); - return; + // Is the path length is less than maxPathLength + if (pathWalkLength < maxPathLength) { + // Is this path unwanted + bool unwanted = false; + + // First check of this item - if we are in a --dry-run scenario, we may have 'fake deleted' this path + // thus, the entries are not in the dry-run DB copy, thus, at this point the client thinks that this is an item to upload + // Check this 'path' for an entry in pathFakeDeletedArray - if it is there, this is unwanted + if (dryRun) { + // Is this path in the array of fake deleted items? If yes, return early, nothing else to do, save processing + if (canFind(pathFakeDeletedArray, path)) return; } - } - if (fromItem.parentId == null) { - // the item is a remote folder, need to do the operation on the parent - enforce(itemdb.selectByPathWithoutRemote(from, defaultDriveId, fromItem)); - } - if (itemdb.selectByPath(to, defaultDriveId, toItem)) { - // the destination has been overwritten - uploadDeleteItem(toItem, to); - } - if (!itemdb.selectByPath(dirName(to), defaultDriveId, parentItem)) { - // the parent item is not in the database - - // is the destination a .folder that is being skipped? - if (cfg.getValueBool("skip_dotfiles")) { - if (isDotFile(dirName(to))) { - // target location is a .folder - log.vdebug("Target location is excluded from sync due to skip_dotfiles = true"); - // item will have been moved locally, but as this is now to a location that is not synced, needs to be removed from OneDrive - log.log("Item has been moved to a location that is excluded from sync operations. Removing item from OneDrive"); - uploadDeleteItem(fromItem, from); - return; + + // This not a Client Side Filtering check, nor a Microsoft Check, but is a sanity check that the path provided is UTF encoded correctly + // Check the std.encoding of the path against: Unicode 5.0, ASCII, ISO-8859-1, ISO-8859-2, WINDOWS-1250, WINDOWS-1251, WINDOWS-1252 + if (!unwanted) { + if(!isValid(path)) { + // Path is not valid according to https://dlang.org/phobos/std_encoding.html + log.logAndNotify("Skipping item - invalid character encoding sequence: ", path); + unwanted = true; } } - // some other error - throw new SyncException("Can't move an item to an unsynced directory"); - } - if (cfg.getValueBool("skip_dotfiles") && isDotFile(to)){ - log.log("Removing item from OneDrive due to skip_dotfiles = true"); - uploadDeleteItem(fromItem, from); - return; - } - if (fromItem.driveId != parentItem.driveId) { - // items cannot be moved between drives - uploadDeleteItem(fromItem, from); - uploadNewFile(to); - } else { - if (!exists(to)) { - log.vlog("uploadMoveItem target has disappeared: ", to); - return; + // Check this path against the Client Side Filtering Rules + // - check_nosync + // - skip_dotfiles + // - skip_symlinks + // - skip_file + // - skip_dir + // - sync_list + // - skip_size + if (!unwanted) { + unwanted = checkPathAgainstClientSideFiltering(path); } - SysTime mtime = timeLastModified(to).toUTC(); - JSONValue diff = [ - "name": JSONValue(baseName(to)), - "parentReference": JSONValue([ - "id": parentItem.id - ]), - "fileSystemInfo": JSONValue([ - "lastModifiedDateTime": mtime.toISOExtString() - ]) - ]; - // Perform the move operation on OneDrive - JSONValue response; - try { - response = onedrive.updateById(fromItem.driveId, fromItem.id, diff, fromItem.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 412) { - // OneDrive threw a 412 error, most likely: ETag does not match current item's value - // Retry without eTag - log.vdebug("File Move Failed - OneDrive eTag / cTag match issue"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting to move the file - gracefully handling error"); - string nullTag = null; - // move the file but without the eTag - response = onedrive.updateById(fromItem.driveId, fromItem.id, diff, nullTag); + // Check this path against the Microsoft Naming Conventions & Restristions + // - Microsoft OneDrive restriction and limitations about Windows naming files + // - Bad whitespace items + // - HTML ASCII Codes as part of file name + if (!unwanted) { + unwanted = checkPathAgainstMicrosoftNamingRestrictions(path); + } + + if (!unwanted) { + // At this point, this path, we want to scan for new data as it is not excluded + if (isDir(path)) { + // Check if this path in the database + bool directoryFoundInDB = pathFoundInDatabase(path); + + // Was the path found in the database? + if (!directoryFoundInDB) { + // Path not found in database when searching all drive id's + if (!cleanupLocalFiles) { + // --download-only --cleanup-local-files not used + // Create this directory on OneDrive so that we can upload files to it + createDirectoryOnline(path); + } else { + // we need to clean up this directory + log.log("Removing local directory as --download-only & --cleanup-local-files configured"); + // Remove any children of this path if they still exist + // Resolve 'Directory not empty' error when deleting local files + try { + foreach (DirEntry child; dirEntries(path, SpanMode.depth, false)) { + // what sort of child is this? + if (isDir(child.name)) { + log.log("Removing local directory: ", child.name); + } else { + log.log("Removing local file: ", child.name); + } + + // are we in a --dry-run scenario? + if (!dryRun) { + // No --dry-run ... process local delete + if (exists(child)) { + try { + attrIsDir(child.linkAttributes) ? rmdir(child.name) : remove(child.name); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } + } + } + // Remove the path now that it is empty of children + log.log("Removing local directory: ", path); + // are we in a --dry-run scenario? + if (!dryRun) { + // No --dry-run ... process local delete + try { + rmdirRecurse(path); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + return; + } + } + } + + // flag for if we are going traverse this path + bool skipFolderTraverse = false; + + // Before we traverse this 'path', we need to make a last check to see if this was just excluded + if (appConfig.accountType == "business") { + // search businessSharedFoldersOnlineToSkip for this path + if (canFind(businessSharedFoldersOnlineToSkip, path)) { + // This path was skipped - why? + log.logAndNotify("Skipping item '", path, "' due to this path matching an existing online Business Shared Folder name"); + skipFolderTraverse = true; + } + } + + // Do we traverse this path? + if (!skipFolderTraverse) { + // Try and access this directory and any path below + try { + auto entries = dirEntries(path, SpanMode.shallow, false); + foreach (DirEntry entry; entries) { + string thisPath = entry.name; + scanPathForNewData(thisPath); + } + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + return; + } + } + } else { + // https://github.com/abraunegg/onedrive/issues/984 + // path is not a directory, is it a valid file? + // pipes - whilst technically valid files, are not valid for this client + // prw-rw-r--. 1 user user 0 Jul 7 05:55 my_pipe + if (isFile(path)) { + // Path is a valid file, not a pipe + bool fileFoundInDB = pathFoundInDatabase(path); + // Was the file found in the database? + if (!fileFoundInDB) { + // File not found in database when searching all drive id's + // Do we upload the file or clean up the file? + if (!cleanupLocalFiles) { + // --download-only --cleanup-local-files not used + // Add this path as a file we need to upload + log.vdebug("OneDrive Client flagging to upload this file to OneDrive: ", path); + newLocalFilesToUploadToOneDrive ~= path; + } else { + // we need to clean up this file + log.log("Removing local file as --download-only & --cleanup-local-files configured"); + // are we in a --dry-run scenario? + log.log("Removing local file: ", path); + if (!dryRun) { + // No --dry-run ... process local file delete + safeRemove(path); + } + } + } + } else { + // path is not a valid file + log.logAndNotify("Skipping item - item is not a valid file: ", path); + } } - } - // save the move response from OneDrive in the database - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); + } + } else { + // This path was skipped - why? + log.logAndNotify("Skipping item '", path, "' due to the full path exceeding ", maxPathLength, " characters (Microsoft OneDrive limitation)"); } } - - // delete an item by it's path - void deleteByPath(const(string) path) - { - Item item; - // Need to check all driveid's we know about, not just the defaultDriveId - bool itemInDB = false; - foreach (searchDriveId; driveIDsArray) { - if (itemdb.selectByPath(path, searchDriveId, item)) { - // item was found in the DB - itemInDB = true; - break; + + // Handle a single file inotify trigger when using --monitor + void handleLocalFileTrigger(string localFilePath) { + // Is this path a new file or an existing one? + // Normally we would use pathFoundInDatabase() to calculate, but we need 'databaseItem' as well if the item is in the database + Item databaseItem; + bool fileFoundInDB = false; + string[3][] modifiedItemToUpload; + + foreach (driveId; driveIDsArray) { + if (itemDB.selectByPath(localFilePath, driveId, databaseItem)) { + fileFoundInDB = true; } } - if (!itemInDB) { - throw new SyncException("The item to delete is not in the local database"); - } - if (item.parentId == null) { - // the item is a remote folder, need to do the operation on the parent - enforce(itemdb.selectByPathWithoutRemote(path, defaultDriveId, item)); + // Was the file found in the database? + if (!fileFoundInDB) { + // This is a new file + scanLocalFilesystemPathForNewData(localFilePath); + } else { + // This is a modified file, needs to be handled as such + modifiedItemToUpload ~= [databaseItem.driveId, databaseItem.id, localFilePath]; + uploadChangedLocalFileToOneDrive(modifiedItemToUpload); } - try { - if (noRemoteDelete) { - // do not process remote delete - log.vlog("Skipping remote delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } - } catch (OneDriveException e) { - if (e.httpStatusCode == 404) { - log.log(e.msg); - } else { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + } + + // Query the database to determine if this path is within the existing database + bool pathFoundInDatabase(string searchPath) { + + // Check if this path in the database + Item databaseItem; + bool pathFoundInDB = false; + foreach (driveId; driveIDsArray) { + if (itemDB.selectByPath(searchPath, driveId, databaseItem)) { + pathFoundInDB = true; } } + return pathFoundInDB; } - // move a OneDrive folder from one name to another - void moveByPath(const(string) source, const(string) destination) - { - log.vlog("Moving remote folder: ", source, " -> ", destination); + // Create a new directory online on OneDrive + // - Test if we can get the parent path details from the database, otherwise we need to search online + // for the path flow and create the folder that way + void createDirectoryOnline(string thisNewPathToCreate) { - // Source and Destination are relative to ~/OneDrive - string sourcePath = source; - string destinationBasePath = dirName(destination).idup; + log.log("OneDrive Client requested to create this directory online: ", thisNewPathToCreate); - // if destinationBasePath == '.' then destinationBasePath needs to be "" - if (destinationBasePath == ".") { - destinationBasePath = ""; - } + Item parentItem; + JSONValue onlinePathData; - string newFolderName = baseName(destination).idup; - string destinationPathString = "/drive/root:/" ~ destinationBasePath; + // Create a new API Instance for this thread and initialise it + OneDriveApi createDirectoryOnlineOneDriveApiInstance; + createDirectoryOnlineOneDriveApiInstance = new OneDriveApi(appConfig); + createDirectoryOnlineOneDriveApiInstance.initialise(); - // Build up the JSON changes - JSONValue moveData = ["name": newFolderName]; - JSONValue destinationPath = ["path": destinationPathString]; - moveData["parentReference"] = destinationPath; - - // Make the change on OneDrive - auto res = onedrive.moveByPath(sourcePath, moveData); - } - - // Query Office 365 SharePoint Shared Library site to obtain it's Drive ID - void querySiteCollectionForDriveID(string o365SharedLibraryName) - { - // Steps to get the ID: - // 1. Query https://graph.microsoft.com/v1.0/sites?search= with the name entered - // 2. Evaluate the response. A valid response will contain the description and the id. If the response comes back with nothing, the site name cannot be found or no access - // 3. If valid, use the returned ID and query the site drives - // https://graph.microsoft.com/v1.0/sites//drives - // 4. Display Shared Library Name & Drive ID + // What parent path to use? + string parentPath = dirName(thisNewPathToCreate); // will be either . or something else - string site_id; - string drive_id; - bool found = false; - JSONValue siteQuery; - string nextLink; - string[] siteSearchResults; - - // The account type must not be a personal account type - if (accountType == "personal"){ - log.error("ERROR: A OneDrive Personal Account cannot be used with --get-O365-drive-id. Please re-authenticate your client using a OneDrive Business Account."); - return; - } - - // What query are we performing? - log.log("Office 365 Library Name Query: ", o365SharedLibraryName); - - for (;;) { - try { - siteQuery = onedrive.o365SiteSearch(nextLink); - } catch (OneDriveException e) { - log.error("ERROR: Query of OneDrive for Office 365 Library Name failed"); - // Forbidden - most likely authentication scope needs to be updated - if (e.httpStatusCode == 403) { - log.error("ERROR: Authentication scope needs to be updated. Use --reauth and re-authenticate client."); - return; + // Configure the parentItem by if this is the account 'root' use the root details, or search the database for the parent details + if (parentPath == ".") { + // Parent path is '.' which is the account root + // Use client defaults + parentItem.driveId = appConfig.defaultDriveId; // Should give something like 12345abcde1234a1 + parentItem.id = appConfig.defaultRootId; // Should give something like 12345ABCDE1234A1!101 + } else { + // Query the parent path online + log.vdebug("Attempting to query Local Database for this parent path: ", parentPath); + + // Attempt a 2 step process to work out where to create the directory + // Step 1: Query the DB first + // Step 2: Query online as last resort + + // Step 1: Check if this path in the database + Item databaseItem; + bool pathFoundInDB = false; + foreach (driveId; driveIDsArray) { + if (itemDB.selectByPath(parentPath, driveId, databaseItem)) { + pathFoundInDB = true; + log.vdebug("databaseItem: ", databaseItem); + log.vdebug("pathFoundInDB: ", pathFoundInDB); } - // Requested resource cannot be found - if (e.httpStatusCode == 404) { - string siteSearchUrl; - if (nextLink.empty) { - siteSearchUrl = onedrive.getSiteSearchUrl(); + } + + // Step 2: Query for the path online + if (!pathFoundInDB) { + // path not found in database + try { + log.vdebug("Attempting to query OneDrive Online for this parent path as path not found in local database: ", parentPath); + onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetails(parentPath); + // Save item to the database + saveItem(onlinePathData); + parentItem = makeItem(onlinePathData); + } catch (OneDriveException exception) { + if (exception.httpStatusCode == 404) { + // Parent does not exist ... need to create parent + log.vdebug("Parent path does not exist online: ", parentPath); + createDirectoryOnline(parentPath); + // no return here as we need to continue, but need to re-query the OneDrive API to get the right parental details now that they exist + onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetails(parentPath); + parentItem = makeItem(onlinePathData); } else { - siteSearchUrl = nextLink; - } - // log the error - log.error("ERROR: Your OneDrive Account and Authentication Scope cannot access this OneDrive API: ", siteSearchUrl); - log.error("ERROR: To resolve, please discuss this issue with whomever supports your OneDrive and SharePoint environment."); - return; - } - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query OneDrive drive children"); - } - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query Sharepoint Sites - retrying applicable request"); - log.vdebug("siteQuery = onedrive.o365SiteSearch(nextLink) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); - } - // re-try original request - retried for 429 and 504 - try { - log.vdebug("Retrying Query: siteQuery = onedrive.o365SiteSearch(nextLink)"); - siteQuery = onedrive.o365SiteSearch(nextLink); - log.vdebug("Query 'siteQuery = onedrive.o365SiteSearch(nextLink)' performed successfully on re-try"); - } catch (OneDriveException e) { - // display what the error is - log.vdebug("Query Error: siteQuery = onedrive.o365SiteSearch(nextLink) on re-try after delay"); - // error was not a 504 this time - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(createDirectoryOnlineOneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to create a remote directory on OneDrive - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.vdebug("Retrying Function: ", thisFunctionName); + createDirectoryOnline(thisNewPathToCreate); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } } - } else { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; } + } else { + // parent path found in database ... use those details ... + parentItem = databaseItem; } + } + + // Make sure the full path does not exist online, this should generate a 404 response, to which then the folder will be created online + try { + // Try and query the OneDrive API for the path we need to create + log.vdebug("Attempting to query OneDrive for this path: ", thisNewPathToCreate); - // is siteQuery a valid JSON object & contain data we can use? - if ((siteQuery.type() == JSONType.object) && ("value" in siteQuery)) { - // valid JSON object - log.vdebug("O365 Query Response: ", siteQuery); + if (parentItem.driveId == appConfig.defaultDriveId) { + // Use getPathDetailsByDriveId + onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, thisNewPathToCreate); + } else { + // If the parentItem.driveId is not our driveId - the path we are looking for will not be at the logical location that getPathDetailsByDriveId + // can use - as it will always return a 404 .. even if the path actually exists (which is the whole point of this test) + // Search the parentItem.driveId for any folder name match that we are going to create, then compare response JSON items with parentItem.id + // If no match, the folder we want to create does not exist at the location we are seeking to create it at, thus generate a 404 + onlinePathData = createDirectoryOnlineOneDriveApiInstance.searchDriveForPath(parentItem.driveId, baseName(thisNewPathToCreate)); - foreach (searchResult; siteQuery["value"].array) { - // Need an 'exclusive' match here with o365SharedLibraryName as entered - log.vdebug("Found O365 Site: ", searchResult); - - // 'displayName' and 'id' have to be present in the search result record in order to query the site - if (("displayName" in searchResult) && ("id" in searchResult)) { - if (o365SharedLibraryName == searchResult["displayName"].str){ - // 'displayName' matches search request - site_id = searchResult["id"].str; - JSONValue siteDriveQuery; - - try { - siteDriveQuery = onedrive.o365SiteDrives(site_id); - } catch (OneDriveException e) { - log.error("ERROR: Query of OneDrive for Office Site ID failed"); - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + // Process the response from searching the drive + ulong responseCount = count(onlinePathData["value"].array); + if (responseCount > 0) { + // Search 'name' matches were found .. need to match these against parentItem.id + bool foundDirectoryOnline = false; + JSONValue foundDirectoryJSONItem; + // Items were returned .. but is one of these what we are looking for? + foreach (childJSON; onlinePathData["value"].array) { + // Is this item not a file? + if (!isFileItem(childJSON)) { + Item thisChildItem = makeItem(childJSON); + // Direct Match Check + if ((parentItem.id == thisChildItem.parentId) && (baseName(thisNewPathToCreate) == thisChildItem.name)) { + // High confidence that this child folder is a direct match we are trying to create and it already exists online + log.vdebug("Path we are searching for exists online: ", baseName(thisNewPathToCreate)); + log.vdebug("childJSON: ", childJSON); + foundDirectoryOnline = true; + foundDirectoryJSONItem = childJSON; + break; } + // Full Lower Case POSIX Match Check + string childAsLower = toLower(childJSON["name"].str); + string thisFolderNameAsLower = toLower(baseName(thisNewPathToCreate)); - // is siteDriveQuery a valid JSON object & contain data we can use? - if ((siteDriveQuery.type() == JSONType.object) && ("value" in siteDriveQuery)) { - // valid JSON object - foreach (driveResult; siteDriveQuery["value"].array) { - // Display results - writeln("-----------------------------------------------"); - log.vdebug("Site Details: ", driveResult); - found = true; - writeln("Site Name: ", searchResult["displayName"].str); - writeln("Library Name: ", driveResult["name"].str); - writeln("drive_id: ", driveResult["id"].str); - writeln("Library URL: ", driveResult["webUrl"].str); - } - // closeout - writeln("-----------------------------------------------"); - } else { - // not a valid JSON object - log.error("ERROR: There was an error performing this operation on OneDrive"); - log.error("ERROR: Increase logging verbosity to assist determining why."); - return; + if (childAsLower == thisFolderNameAsLower) { + // This is a POSIX 'case in-sensitive match' ..... + // Local item name has a 'case-insensitive match' to an existing item on OneDrive + foundDirectoryOnline = true; + foundDirectoryJSONItem = childJSON; + break; } } + } + + if (foundDirectoryOnline) { + // Directory we are seeking was found online ... + onlinePathData = foundDirectoryJSONItem; } else { - // 'displayName', 'id' or ''webUrl' not present in JSON results for a specific site - string siteNameAvailable = "Site 'name' was restricted by OneDrive API permissions"; - bool displayNameAvailable = false; - bool idAvailable = false; - if ("name" in searchResult) siteNameAvailable = searchResult["name"].str; - if ("displayName" in searchResult) displayNameAvailable = true; - if ("id" in searchResult) idAvailable = true; - - // Display error details for this site data - writeln(); - log.error("ERROR: SharePoint Site details not provided for: ", siteNameAvailable); - log.error("ERROR: The SharePoint Site results returned from OneDrive API do not contain the required items to match. Please check your permissions with your site administrator."); - log.error("ERROR: Your site security settings is preventing the following details from being accessed: 'displayName' or 'id'"); - log.vlog(" - Is 'displayName' available = ", displayNameAvailable); - log.vlog(" - Is 'id' available = ", idAvailable); - log.error("ERROR: To debug this further, please increase verbosity (--verbose or --verbose --verbose) to provide further insight as to what details are actually being returned."); + // No 'search item matches found' - raise a 404 so that the exception handling will take over to create the folder + throw new OneDriveException(404, "Name not found via search"); } + } else { + // No 'search item matches found' - raise a 404 so that the exception handling will take over to create the folder + throw new OneDriveException(404, "Name not found via search"); } + } + } catch (OneDriveException exception) { + if (exception.httpStatusCode == 404) { + // This is a good error - it means that the directory to create 100% does not exist online + // The directory was not found on the drive id we queried + log.vlog("The requested directory to create was not found on OneDrive - creating remote directory: ", thisNewPathToCreate); - if(!found) { - // The SharePoint site we are searching for was not found in this bundle set - // Add to siteSearchResults so we can display what we did find - string siteSearchResultsEntry; - foreach (searchResult; siteQuery["value"].array) { - // We can only add the displayName if it is available - if ("displayName" in searchResult) { - // Use the displayName - siteSearchResultsEntry = " * " ~ searchResult["displayName"].str; - siteSearchResults ~= siteSearchResultsEntry; + // Build up the create directory request + JSONValue createDirectoryOnlineAPIResponse; + JSONValue newDriveItem = [ + "name": JSONValue(baseName(thisNewPathToCreate)), + "folder": parseJSON("{}") + ]; + + // Submit the creation request + // Fix for https://github.com/skilion/onedrive/issues/356 + if (!dryRun) { + try { + // Attempt to create a new folder on the configured parent driveId & parent id + createDirectoryOnlineAPIResponse = createDirectoryOnlineOneDriveApiInstance.createById(parentItem.driveId, parentItem.id, newDriveItem); + // Is the response a valid JSON object - validation checking done in saveItem + saveItem(createDirectoryOnlineAPIResponse); + // Log that the directory was created + log.log("Successfully created the remote directory ", thisNewPathToCreate, " on OneDrive"); + } catch (OneDriveException exception) { + if (exception.httpStatusCode == 409) { + // OneDrive API returned a 404 (above) to say the directory did not exist + // but when we attempted to create it, OneDrive responded that it now already exists + log.vlog("OneDrive reported that ", thisNewPathToCreate, " already exists .. OneDrive API race condition"); + return; } else { - // Add, but indicate displayName unavailable, use id - if ("id" in searchResult) { - siteSearchResultsEntry = " * " ~ "Unknown displayName (Data not provided by API), Site ID: " ~ searchResult["id"].str; - siteSearchResults ~= siteSearchResultsEntry; - } else { - // displayName and id unavailable, display in debug log the entry - log.vdebug("Bad SharePoint Data for site: ", searchResult); - } + // some other error from OneDrive was returned - display what it is + log.error("OneDrive generated an error when creating this path: ", thisNewPathToCreate); + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + return; } } + } else { + // Simulate a successful 'directory create' & save it to the dryRun database copy + log.log("Successfully created the remote directory ", thisNewPathToCreate, " on OneDrive"); + // The simulated response has to pass 'makeItem' as part of saveItem + auto fakeResponse = createFakeResponse(thisNewPathToCreate); + // Save item to the database + saveItem(fakeResponse); } - } else { - // not a valid JSON object - log.error("ERROR: There was an error performing this operation on OneDrive"); - log.error("ERROR: Increase logging verbosity to assist determining why."); + + // Shutdown API instance + createDirectoryOnlineOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(createDirectoryOnlineOneDriveApiInstance); return; - } + + } else { - // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response - // to indicate more items are available and provide the request URL for the next page of items. - if ("@odata.nextLink" in siteQuery) { - // Update nextLink to next set of SharePoint library names - nextLink = siteQuery["@odata.nextLink"].str; - log.vdebug("Setting nextLink to (@odata.nextLink): ", nextLink); - } else break; - } - - // Was the intended target found? - if(!found) { - writeln(); - log.error("ERROR: The requested SharePoint site could not be found. Please check it's name and your permissions to access the site."); - // List all sites returned to assist user - writeln(); - log.log("The following SharePoint site names were returned:"); - foreach (searchResultEntry; siteSearchResults) { - // list the display name that we use to match against the user query - log.log(searchResultEntry); + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(createDirectoryOnlineOneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to create a remote directory on OneDrive - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.vdebug("Retrying Function: ", thisFunctionName); + createDirectoryOnline(thisNewPathToCreate); + } else { + // Re-Try + createDirectoryOnline(thisNewPathToCreate); + } } } - } - - // Create an anonymous read-only shareable link for an existing file on OneDrive - void createShareableLinkForFile(string filePath, bool writeablePermissions) - { - JSONValue onedrivePathDetails; - JSONValue createShareableLinkResponse; - string driveId; - string itemId; - string fileShareLink; - - // Get the path details from OneDrive - try { - onedrivePathDetails = onedrive.getPathDetails(filePath); // Returns a JSON String for the OneDrive Path - } catch (OneDriveException e) { - log.vdebug("onedrivePathDetails = onedrive.getPathDetails(filePath); generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // Requested path could not be found - log.error("ERROR: The requested path to query was not found on OneDrive"); - log.error("ERROR: Cannot create a shareable link for a file that does not exist on OneDrive"); - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling queryDriveForChanges(path);"); - createShareableLinkForFile(filePath, writeablePermissions); - // return back to original call - return; - } - - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying request"); - // Retry original request by calling function again to avoid replicating any further error handling - createShareableLinkForFile(filePath, writeablePermissions); - // return back to original call - return; - } else { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - } - // Was a valid JSON response received? - if (onedrivePathDetails.type() == JSONType.object) { - // valid JSON response for the file was received - // Configure the required variables - driveId = onedrivePathDetails["parentReference"]["driveId"].str; - itemId = onedrivePathDetails["id"].str; - - // What sort of shareable link is required? - JSONValue accessScope; - if (writeablePermissions) { - // configure the read-write access scope - accessScope = [ - "type": "edit", - "scope": "anonymous" - ]; - } else { - // configure the read-only access scope (default) - accessScope = [ - "type": "view", - "scope": "anonymous" - ]; - } - - // Create the shareable file link - createShareableLinkResponse = onedrive.createShareableLink(driveId, itemId, accessScope); - if ((createShareableLinkResponse.type() == JSONType.object) && ("link" in createShareableLinkResponse)) { - // Extract the file share link from the JSON response - fileShareLink = createShareableLinkResponse["link"]["webUrl"].str; - writeln("File Shareable Link: ", fileShareLink); - if (writeablePermissions) { - writeln("Shareable Link has read-write permissions - use and provide with caution"); + // If we get to this point - onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, thisNewPathToCreate) generated a 'valid' response .... + // This means that the folder potentially exists online .. which is odd .. as it should not have existed + if (onlinePathData.type() == JSONType.object) { + // A valid object was responded with + if (onlinePathData["name"].str == baseName(thisNewPathToCreate)) { + // OneDrive 'name' matches local path name + if (appConfig.accountType == "business") { + // We are a business account, this existing online folder, could be a Shared Online Folder and is the 'Add shortcut to My files' item + log.vdebug("onlinePathData: ", onlinePathData); + if (isItemRemote(onlinePathData)) { + // The folder is a remote item ... we do not want to create this ... + log.vdebug("Remote Existing Online Folder is most likely a OneDrive Shared Business Folder Link added by 'Add shortcut to My files'"); + log.vdebug("We need to skip this path: ", thisNewPathToCreate); + // Add this path to businessSharedFoldersOnlineToSkip + businessSharedFoldersOnlineToSkip ~= [thisNewPathToCreate]; + // no save to database, no online create + return; + } } + log.vlog("The requested directory to create was found on OneDrive - skipping creating the directory: ", thisNewPathToCreate); + // Is the response a valid JSON object - validation checking done in saveItem + saveItem(onlinePathData); + return; } else { - // not a valid JSON object - log.error("ERROR: There was an error performing this operation on OneDrive"); - log.error("ERROR: Increase logging verbosity to assist determining why."); + // Normally this would throw an error, however we cant use throw new posixException() + string msg = format("POSIX 'case-insensitive match' between '%s' (local) and '%s' (online) which violates the Microsoft OneDrive API namespace convention", baseName(thisNewPathToCreate), onlinePathData["name"].str); + displayPosixErrorMessage(msg); + log.error("ERROR: Requested directory to create has a 'case-insensitive match' to an existing directory on OneDrive online."); + log.error("ERROR: To resolve, rename this local directory: ", buildNormalizedPath(absolutePath(thisNewPathToCreate))); + log.log("Skipping creating this directory online due to 'case-insensitive match': ", thisNewPathToCreate); + // Add this path to posixViolationPaths + posixViolationPaths ~= [thisNewPathToCreate]; return; } } else { - // not a valid JSON object + // response is not valid JSON, an error was returned from OneDrive log.error("ERROR: There was an error performing this operation on OneDrive"); log.error("ERROR: Increase logging verbosity to assist determining why."); + log.log("Skipping: ", buildNormalizedPath(absolutePath(thisNewPathToCreate))); return; - } + } } - // Query OneDrive for file details of a given path - void queryOneDriveForFileDetails(string localFilePath, string syncDir, string outputType) - { - // Query if file is valid locally - if (exists(localFilePath)) { - // File exists locally, does it exist in the database - // Path needs to be relative to sync_dir path - Item item; - string[] distinctDriveIds = itemdb.selectDistinctDriveIds(); - string relativePath = relativePath(localFilePath, syncDir); - bool fileInDB = false; - foreach (searchDriveId; distinctDriveIds) { - if (itemdb.selectByPath(relativePath, searchDriveId, item)) { - // File is in the local database cache - fileInDB = true; - JSONValue fileDetails; - try { - fileDetails = onedrive.getFileDetails(item.driveId, item.id); - } catch (OneDriveException e) { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - - // debug output of response - log.vdebug("API Response: ", fileDetails); - - // What sort of response to we generate - // --get-file-link response - if (outputType == "URL") { - if ((fileDetails.type() == JSONType.object) && ("webUrl" in fileDetails)) { - // Valid JSON object - writeln(fileDetails["webUrl"].str); - } - } - - // --modified-by response - if (outputType == "ModifiedBy") { - if ((fileDetails.type() == JSONType.object) && ("lastModifiedBy" in fileDetails)) { - // Valid JSON object - writeln("Last modified: ", fileDetails["lastModifiedDateTime"].str); - writeln("Last modified by: ", fileDetails["lastModifiedBy"]["user"]["displayName"].str); - // if 'email' provided, add this to the output - if ("email" in fileDetails["lastModifiedBy"]["user"]) { - writeln("Email Address: ", fileDetails["lastModifiedBy"]["user"]["email"].str); - } - } - } - } - } - // was path found? - if (!fileInDB) { - // File has not been synced with OneDrive - log.error("Path has not been synced with OneDrive: ", localFilePath); - } - } else { - // File does not exist locally - log.error("Path not found on local system: ", localFilePath); + // Test that the online name actually matches the requested local name + void performPosixTest(string localNameToCheck, string onlineName) { + + // https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file + // Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same, + // even though some file systems (such as a POSIX-compliant file system) may consider them as different. + // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior. + if (localNameToCheck != onlineName) { + // POSIX Error + // Local item name has a 'case-insensitive match' to an existing item on OneDrive + throw new posixException(localNameToCheck, onlineName); } } - // Query the OneDrive 'drive' to determine if we are 'in sync' or if there are pending changes - void queryDriveForChanges(const(string) path) - { + // Upload new file items as identified + void uploadNewLocalFileItems() { - // Function variables - int validChanges = 0; - long downloadSize = 0; - string driveId; - string folderId; - string deltaLink; - string thisItemId; - string thisItemParentPath; - string syncFolderName; - string syncFolderPath; - string syncFolderChildPath; - JSONValue changes; - JSONValue onedrivePathDetails; - - // Get the path details from OneDrive - try { - onedrivePathDetails = onedrive.getPathDetails(path); // Returns a JSON String for the OneDrive Path - } catch (OneDriveException e) { - log.vdebug("onedrivePathDetails = onedrive.getPathDetails(path); generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // Requested path could not be found - log.error("ERROR: The requested path to query was not found on OneDrive"); - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling queryDriveForChanges(path);"); - queryDriveForChanges(path); - // return back to original call - return; - } - - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying request"); - // Retry original request by calling function again to avoid replicating any further error handling - queryDriveForChanges(path); - // return back to original call - return; - } else { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - } + // Lets deal with the new local items in a batch process + ulong batchSize = appConfig.concurrentThreads; + ulong batchCount = (newLocalFilesToUploadToOneDrive.length + batchSize - 1) / batchSize; + ulong batchesProcessed = 0; - if(isItemRemote(onedrivePathDetails)){ - // remote changes - driveId = onedrivePathDetails["remoteItem"]["parentReference"]["driveId"].str; // Should give something like 66d53be8a5056eca - folderId = onedrivePathDetails["remoteItem"]["id"].str; // Should give something like BC7D88EC1F539DCF!107 - syncFolderName = onedrivePathDetails["name"].str; - // A remote drive item will not have ["parentReference"]["path"] - syncFolderPath = ""; - syncFolderChildPath = ""; - } else { - driveId = defaultDriveId; - folderId = onedrivePathDetails["id"].str; // Should give something like 12345ABCDE1234A1!101 - syncFolderName = onedrivePathDetails["name"].str; - if (hasParentReferencePath(onedrivePathDetails)) { - syncFolderPath = onedrivePathDetails["parentReference"]["path"].str; - syncFolderChildPath = syncFolderPath ~ "/" ~ syncFolderName ~ "/"; - } else { - // root drive item will not have ["parentReference"]["path"] - syncFolderPath = ""; - syncFolderChildPath = ""; - } + foreach (chunk; newLocalFilesToUploadToOneDrive.chunks(batchSize)) { + uploadNewLocalFileItemsInParallel(chunk); + } + } + + // Upload the file batches in parallel + void uploadNewLocalFileItemsInParallel(string[] array) { + + foreach (i, fileToUpload; taskPool.parallel(array)) { + log.vdebug("Upload Thread ", i, " Starting: ", Clock.currTime()); + uploadNewFile(fileToUpload); + log.vdebug("Upload Thread ", i, " Finished: ", Clock.currTime()); } + } + + // Upload a new file to OneDrive + void uploadNewFile(string fileToUpload) { + + // Debug for the moment + log.vdebug("fileToUpload: ", fileToUpload); - // Query Database for the deltaLink - deltaLink = itemdb.getDeltaLink(driveId, folderId); + // These are the details of the item we need to upload + // How much space is remaining on OneDrive + ulong remainingFreeSpaceOnline; + // Did the upload fail? + bool uploadFailed = false; + // Did we skip due to exceeding maximum allowed size? + bool skippedMaxSize = false; + // Did we skip to an exception error? + bool skippedExceptionError = false; + // Is the parent path in the item database? + bool parentPathFoundInDB = false; + // Get this file size + ulong thisFileSize; + // Is there space available online + bool spaceAvailableOnline = false; - const(char)[] idToQuery; - if (driveId == defaultDriveId) { - // The drive id matches our users default drive id - idToQuery = defaultRootId.dup; + // Check the database for the parent path of fileToUpload + Item parentItem; + // What parent path to use? + string parentPath = dirName(fileToUpload); // will be either . or something else + if (parentPath == "."){ + // Assume this is a new file in the users configured sync_dir root + // Use client defaults + parentItem.id = appConfig.defaultRootId; // Should give something like 12345ABCDE1234A1!101 + parentItem.driveId = appConfig.defaultDriveId; // Should give something like 12345abcde1234a1 + parentPathFoundInDB = true; } else { - // The drive id does not match our users default drive id - // Potentially the 'path id' we are requesting the details of is a Shared Folder (remote item) - // Use folderId - idToQuery = folderId; + // Query the database using each of the driveId's we are using + foreach (driveId; driveIDsArray) { + // Query the database for this parent path using each driveId + Item dbResponse; + if(itemDB.selectByPath(parentPath, driveId, dbResponse)){ + // parent path was found in the database + parentItem = dbResponse; + parentPathFoundInDB = true; + } + } } - // Query OneDrive changes - try { - changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink); - } catch (OneDriveException e) { - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling queryDriveForChanges(path);"); - queryDriveForChanges(path); - // return back to original call - return; - } else { - // OneDrive threw an error - log.vdebug("Error query: changes = onedrive.viewChangesById(driveId, idToQuery, deltaLink)"); - log.vdebug("OneDrive threw an error when querying for these changes:"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - log.vdebug("Previous deltaLink: ", deltaLink); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } + // If the parent path was found in the DB, to ensure we are uploading the the right location 'parentItem.driveId' must not be empty + if ((parentPathFoundInDB) && (parentItem.driveId.empty)) { + // switch to using defaultDriveId + log.log("parentItem.driveId is empty - using defaultDriveId for upload API calls"); + parentItem.driveId = appConfig.defaultDriveId; } - // Are there any changes on OneDrive? - if (count(changes["value"].array) != 0) { - // Were we given a remote path to check if we are in sync for, or the root? - if (path != "/") { - // we were given a directory to check, we need to validate the list of changes against this path only - foreach (item; changes["value"].array) { - // Is this change valid for the 'path' we are checking? - if (hasParentReferencePath(item)) { - thisItemId = item["parentReference"]["id"].str; - thisItemParentPath = item["parentReference"]["path"].str; + // Can we read the file - as a permissions issue or actual file corruption will cause a failure + // Resolves: https://github.com/abraunegg/onedrive/issues/113 + if (readLocalFile(fileToUpload)) { + if (parentPathFoundInDB) { + // The local file can be read - so we can read it to attemtp to upload it in this thread + // Get the file size + thisFileSize = getSize(fileToUpload); + // Does this file exceed the maximum filesize for OneDrive + // Resolves: https://github.com/skilion/onedrive/issues/121 , https://github.com/skilion/onedrive/issues/294 , https://github.com/skilion/onedrive/issues/329 + if (thisFileSize <= maxUploadFileSize) { + // Is there enough free space on OneDrive when we started this thread, to upload the file to OneDrive? + remainingFreeSpaceOnline = getRemainingFreeSpace(parentItem.driveId); + log.vdebug("Current Available Space Online (Upload Target Drive ID): ", (remainingFreeSpaceOnline / 1024 / 1024), " MB"); + + // When we compare the space online to the total we are trying to upload - is there space online? + ulong calculatedSpaceOnlinePostUpload = remainingFreeSpaceOnline - thisFileSize; + + // If 'personal' accounts, if driveId == defaultDriveId, then we will have data - appConfig.quotaAvailable will be updated + // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - appConfig.quotaRestricted will be set as true + // If 'business' accounts, if driveId == defaultDriveId, then we will have data + // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value - appConfig.quotaRestricted will be set as true + + if (remainingFreeSpaceOnline > totalDataToUpload) { + // Space available + spaceAvailableOnline = true; } else { - thisItemId = item["id"].str; - // Is the defaultDriveId == driveId - if (driveId == defaultDriveId){ - // 'root' items will not have ["parentReference"]["path"] - if (isItemRoot(item)){ - thisItemParentPath = ""; + // we need to look more granular + // What was the latest getRemainingFreeSpace() value? + if (appConfig.quotaAvailable) { + // Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload? + if (calculatedSpaceOnlinePostUpload > 0) { + // Based on this thread action, we beleive that there is space available online to upload - proceed + spaceAvailableOnline = true; + } + } + } + + // Is quota being restricted? + if (appConfig.quotaRestricted) { + // If the upload target drive is not our drive id, then it is a shared folder .. we need to print a space warning message + if (parentItem.driveId != appConfig.defaultDriveId) { + // Different message depending on account type + if (appConfig.accountType == "personal") { + log.vlog("WARNING: Shared Folder OneDrive quota information is being restricted or providing a zero value. Space available online cannot be guaranteed."); } else { - thisItemParentPath = item["parentReference"]["path"].str; + log.vlog("WARNING: Shared Folder OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); } } else { - // A remote drive item will not have ["parentReference"]["path"] - thisItemParentPath = ""; + if (appConfig.accountType == "personal") { + log.vlog("WARNING: OneDrive quota information is being restricted or providing a zero value. Space available online cannot be guaranteed."); + } else { + log.vlog("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); + } } + // Space available online is being restricted - so we have no way to really know if there is space available online + spaceAvailableOnline = true; } - if ( (thisItemId == folderId) || (canFind(thisItemParentPath, syncFolderChildPath)) || (canFind(thisItemParentPath, folderId)) ){ - // This is a change we want count - validChanges++; - if ((isItemFile(item)) && (hasFileSize(item))) { - downloadSize = downloadSize + item["size"].integer; + // Do we have space available or is space available being restricted (so we make the blind assumption that there is space available) + if (spaceAvailableOnline) { + // We need to check that this new local file does not exist on OneDrive + + // Create a new API Instance for this thread and initialise it + OneDriveApi checkFileOneDriveApiInstance; + checkFileOneDriveApiInstance = new OneDriveApi(appConfig); + checkFileOneDriveApiInstance.initialise(); + + JSONValue fileDetailsFromOneDrive; + + // https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file + // Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same, + // even though some file systems (such as a POSIX-compliant file systems that Linux use) may consider them as different. + // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior, OneDrive does not use this. + + // In order to upload this file - this query HAS to respond as a 404 - Not Found + + // Does this 'file' already exist on OneDrive? + try { + fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload); + // Portable Operating System Interface (POSIX) testing of JSON response from OneDrive API + performPosixTest(baseName(fileToUpload), fileDetailsFromOneDrive["name"].str); + + // No 404 or otherwise was triggered, meaning that the file already exists online and passes the POSIX test ... + log.vdebug("fileDetailsFromOneDrive after exist online check: ", fileDetailsFromOneDrive); + // Does the data from online match our local file? + if (performUploadIntegrityValidationChecks(fileDetailsFromOneDrive, fileToUpload, thisFileSize)) { + // Save item to the database + saveItem(fileDetailsFromOneDrive); + } + } catch (OneDriveException exception) { + // If we get a 404 .. the file is not online .. this is what we want .. file does not exist online + if (exception.httpStatusCode == 404) { + // The file has been checked, client side filtering checked, does not exist online - we need to upload it + log.vdebug("fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload); generated a 404 - file does not exist online - must upload it"); + uploadFailed = performNewFileUpload(parentItem, fileToUpload, thisFileSize); + } else { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(checkFileOneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to validate file details on OneDrive - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.vdebug("Retrying Function: ", thisFunctionName); + uploadNewFile(fileToUpload); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + } catch (posixException e) { + displayPosixErrorMessage(e.msg); + uploadFailed = true; } - } - } - // Are there any valid changes? - if (validChanges != 0){ - writeln("Selected directory is out of sync with OneDrive"); - if (downloadSize > 0){ - downloadSize = downloadSize / 1000; - writeln("Approximate data to download from OneDrive: ", downloadSize, " KB"); + + // Operations in this thread are done / complete - either upload was done or it failed + checkFileOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(checkFileOneDriveApiInstance); + } else { + // skip file upload - insufficent space to upload + log.log("Skipping uploading this new file as it exceeds the available free space on OneDrive: ", fileToUpload); + uploadFailed = true; } } else { - writeln("No pending remote changes - selected directory is in sync"); + // Skip file upload - too large + log.log("Skipping uploading this new file as it exceeds the maximum size allowed by OneDrive: ", fileToUpload); + uploadFailed = true; } } else { - writeln("Local directory is out of sync with OneDrive"); - foreach (item; changes["value"].array) { - if ((isItemFile(item)) && (hasFileSize(item))) { - downloadSize = downloadSize + item["size"].integer; - } - } - if (downloadSize > 0){ - downloadSize = downloadSize / 1000; - writeln("Approximate data to download from OneDrive: ", downloadSize, " KB"); + // why was the parent path not in the database? + if (canFind(posixViolationPaths, parentPath)) { + log.error("ERROR: POSIX 'case-insensitive match' for the parent path which violates the Microsoft OneDrive API namespace convention."); + } else { + log.error("ERROR: Parent path is not in the database or online."); } + log.error("ERROR: Unable to upload this file: ", fileToUpload); + uploadFailed = true; } } else { - writeln("No pending remote changes - in sync"); + // Unable to read local file + log.log("Skipping uploading this file as it cannot be read (file permissions or file corruption): ", fileToUpload); + uploadFailed = true; + } + + // Upload success or failure? + if (uploadFailed) { + // Need to add this to fileUploadFailures to capture at the end + fileUploadFailures ~= fileToUpload; } } - // Create a fake OneDrive response suitable for use with saveItem - JSONValue createFakeResponse(const(string) path) - { - import std.digest.sha; - // Generate a simulated JSON response which can be used - // At a minimum we need: - // 1. eTag - // 2. cTag - // 3. fileSystemInfo - // 4. file or folder. if file, hash of file - // 5. id - // 6. name - // 7. parent reference + // Perform the actual upload to OneDrive + bool performNewFileUpload(Item parentItem, string fileToUpload, ulong thisFileSize) { + + // Assume that by default the upload fails + bool uploadFailed = true; - string fakeDriveId = defaultDriveId; - string fakeRootId = defaultRootId; - SysTime mtime = timeLastModified(path).toUTC(); + // OneDrive API Upload Response + JSONValue uploadResponse; - // Need to update the 'fakeDriveId' & 'fakeRootId' with elements from the --dry-run database - // Otherwise some calls to validate objects will fail as the actual driveId being used is invalid - string parentPath = dirName(path); - Item databaseItem; + // Create the OneDriveAPI Upload Instance + OneDriveApi uploadFileOneDriveApiInstance; + uploadFileOneDriveApiInstance = new OneDriveApi(appConfig); + uploadFileOneDriveApiInstance.initialise(); - if (parentPath != ".") { - // Not a 'root' parent - // For each driveid in the existing driveIDsArray - foreach (searchDriveId; driveIDsArray) { - log.vdebug("FakeResponse: searching database for: ", searchDriveId, " ", parentPath); - if (itemdb.selectByPath(parentPath, searchDriveId, databaseItem)) { - log.vdebug("FakeResponse: Found Database Item: ", databaseItem); - fakeDriveId = databaseItem.driveId; - fakeRootId = databaseItem.id; + // Calculate upload speed + auto uploadStartTime = Clock.currTime(); + + // Is this a dry-run scenario? + if (!dryRun) { + // Not a dry-run situation + // Do we use simpleUpload or create an upload session? + bool useSimpleUpload = false; + if (thisFileSize <= sessionThresholdFileSize) { + useSimpleUpload = true; + } + + // We can only upload zero size files via simpleFileUpload regardless of account type + // Reference: https://github.com/OneDrive/onedrive-api-docs/issues/53 + // Additionally, only where file size is < 4MB should be uploaded by simpleUpload - everything else should use a session to upload + + if ((thisFileSize == 0) || (useSimpleUpload)) { + try { + // Attempt to upload the zero byte file using simpleUpload for all account types + uploadResponse = uploadFileOneDriveApiInstance.simpleUpload(fileToUpload, parentItem.driveId, parentItem.id, baseName(fileToUpload)); + uploadFailed = false; + log.log("Uploading new file ", fileToUpload, " ... done."); + // Shutdown the API + uploadFileOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(uploadFileOneDriveApiInstance); + } catch (OneDriveException exception) { + // An error was responded with - what was it + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to upload a new file to OneDrive - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + performNewFileUpload(parentItem, fileToUpload, thisFileSize); + // Return upload status + return uploadFailed; + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + log.log("Uploading new file ", fileToUpload, " ... failed."); + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + + } catch (FileException e) { + // display the error message + log.log("Uploading new file ", fileToUpload, " ... failed."); + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } else { + // Session Upload for this criteria: + // - Personal Account and file size > 4MB + // - All Business | Office365 | SharePoint files > 0 bytes + JSONValue uploadSessionData; + // As this is a unique thread, the sessionFilePath for where we save the data needs to be unique + // The best way to do this is calculate the CRC32 of the file, and use this as the suffix of the session file we save + string threadUploadSessionFilePath = appConfig.uploadSessionFilePath ~ "." ~ computeCRC32(fileToUpload); + + // Attempt to upload the > 4MB file using an upload session for all account types + try { + // Create the Upload Session + uploadSessionData = createSessionFileUpload(uploadFileOneDriveApiInstance, fileToUpload, parentItem.driveId, parentItem.id, baseName(fileToUpload), null, threadUploadSessionFilePath); + } catch (OneDriveException exception) { + // An error was responded with - what was it + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to create an upload session on OneDrive - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.vdebug("Retrying Function: ", thisFunctionName); + performNewFileUpload(parentItem, fileToUpload, thisFileSize); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + log.log("Uploading new file ", fileToUpload, " ... failed."); + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + + } catch (FileException e) { + // display the error message + log.log("Uploading new file ", fileToUpload, " ... failed."); + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + + // Do we have a valid session URL that we can use ? + if (uploadSessionData.type() == JSONType.object) { + // This is a valid JSON object + bool sessionDataValid = true; + + // Validate that we have the following items which we need + if (!hasUploadURL(uploadSessionData)) { + sessionDataValid = false; + log.vdebug("Session data missing 'uploadUrl'"); + } + + if (!hasNextExpectedRanges(uploadSessionData)) { + sessionDataValid = false; + log.vdebug("Session data missing 'nextExpectedRanges'"); + } + + if (!hasLocalPath(uploadSessionData)) { + sessionDataValid = false; + log.vdebug("Session data missing 'localPath'"); + } + + if (sessionDataValid) { + // We have a valid Upload Session Data we can use + + try { + // Try and perform the upload session + uploadResponse = performSessionFileUpload(uploadFileOneDriveApiInstance, thisFileSize, uploadSessionData, threadUploadSessionFilePath); + + if (uploadResponse.type() == JSONType.object) { + uploadFailed = false; + log.log("Uploading new file ", fileToUpload, " ... done."); + } else { + log.log("Uploading new file ", fileToUpload, " ... failed."); + uploadFailed = true; + } + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to upload a new file via a session to OneDrive - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.vdebug("Retrying Function: ", thisFunctionName); + performNewFileUpload(parentItem, fileToUpload, thisFileSize); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + log.log("Uploading new file ", fileToUpload, " ... failed."); + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + + } + } else { + // No Upload URL or nextExpectedRanges or localPath .. not a valid JSON we can use + log.vlog("Session data is missing required elements to perform a session upload."); + log.log("Uploading new file ", fileToUpload, " ... failed."); + } + } else { + // Create session Upload URL failed + log.log("Uploading new file ", fileToUpload, " ... failed."); } } + } else { + // We are in a --dry-run scenario + uploadResponse = createFakeResponse(fileToUpload); + uploadFailed = false; + log.logAndNotify("Uploading new file ", fileToUpload, " ... done."); } - // real id / eTag / cTag are different format for personal / business account - auto sha1 = new SHA1Digest(); - ubyte[] fakedOneDriveItemValues = sha1.digest(path); - - JSONValue fakeResponse; + // Upload has finished + auto uploadFinishTime = Clock.currTime(); + // If no upload failure, calculate metrics, perform integrity validation + if (!uploadFailed) { + // Upload did not fail ... + auto uploadDuration = uploadFinishTime - uploadStartTime; + log.vdebug("File Size: ", thisFileSize, " Bytes"); + log.vdebug("Upload Duration: ", (uploadDuration.total!"msecs"/1e3), " Seconds"); + auto uploadSpeed = (thisFileSize / (uploadDuration.total!"msecs"/1e3)/ 1024 / 1024); + log.vdebug("Upload Speed: ", uploadSpeed, " Mbps (approx)"); - if (isDir(path)) { - // path is a directory - fakeResponse = [ - "id": JSONValue(toHexString(fakedOneDriveItemValues)), - "cTag": JSONValue(toHexString(fakedOneDriveItemValues)), - "eTag": JSONValue(toHexString(fakedOneDriveItemValues)), - "fileSystemInfo": JSONValue([ - "createdDateTime": mtime.toISOExtString(), - "lastModifiedDateTime": mtime.toISOExtString() - ]), - "name": JSONValue(baseName(path)), - "parentReference": JSONValue([ - "driveId": JSONValue(fakeDriveId), - "driveType": JSONValue(accountType), - "id": JSONValue(fakeRootId) - ]), - "folder": JSONValue("") - ]; - } else { - // path is a file - // compute file hash - both business and personal responses use quickXorHash - string quickXorHash = computeQuickXorHash(path); - - fakeResponse = [ - "id": JSONValue(toHexString(fakedOneDriveItemValues)), - "cTag": JSONValue(toHexString(fakedOneDriveItemValues)), - "eTag": JSONValue(toHexString(fakedOneDriveItemValues)), - "fileSystemInfo": JSONValue([ - "createdDateTime": mtime.toISOExtString(), - "lastModifiedDateTime": mtime.toISOExtString() - ]), - "name": JSONValue(baseName(path)), - "parentReference": JSONValue([ - "driveId": JSONValue(fakeDriveId), - "driveType": JSONValue(accountType), - "id": JSONValue(fakeRootId) - ]), - "file": JSONValue([ - "hashes":JSONValue([ - "quickXorHash": JSONValue(quickXorHash) - ]) - - ]) - ]; - } + // OK as the upload did not fail, we need to save the response from OneDrive, but it has to be a valid JSON response + if (uploadResponse.type() == JSONType.object) { + // check if the path still exists locally before we try to set the file times online - as short lived files, whilst we uploaded it - it may not exist locally aready + if (exists(fileToUpload)) { + if (!dryRun) { + // Check the integrity of the uploaded file, if the local file still exists + performUploadIntegrityValidationChecks(uploadResponse, fileToUpload, thisFileSize); - log.vdebug("Generated Fake OneDrive Response: ", fakeResponse); - return fakeResponse; + // Update the file modified time on OneDrive and save item details to database + // Update the item's metadata on OneDrive + SysTime mtime = timeLastModified(fileToUpload).toUTC(); + mtime.fracSecs = Duration.zero; + string newFileId = uploadResponse["id"].str; + string newFileETag = uploadResponse["eTag"].str; + // Attempt to update the online date time stamp based on our local data + uploadLastModifiedTime(parentItem.driveId, newFileId, mtime, newFileETag); + } + } else { + // will be removed in different event! + log.log("File disappeared locally after upload: ", fileToUpload); + } + } else { + // Log that an invalid JSON object was returned + log.vdebug("uploadFileOneDriveApiInstance.simpleUpload or session.upload call returned an invalid JSON Object from the OneDrive API"); + } + } + + // Return upload status + return uploadFailed; } - void handleOneDriveThrottleRequest() - { - // If OneDrive sends a status code 429 then this function will be used to process the Retry-After response header which contains the value by which we need to wait - log.vdebug("Handling a OneDrive HTTP 429 Response Code (Too Many Requests)"); - // Read in the Retry-After HTTP header as set and delay as per this value before retrying the request - auto retryAfterValue = onedrive.getRetryAfterValue(); - log.vdebug("Using Retry-After Value = ", retryAfterValue); + // Create the OneDrive Upload Session + JSONValue createSessionFileUpload(OneDriveApi activeOneDriveApiInstance, string fileToUpload, string parentDriveId, string parentId, string filename, string eTag, string threadUploadSessionFilePath) { - // HTTP request returned status code 429 (Too Many Requests) - // https://github.com/abraunegg/onedrive/issues/133 - // https://github.com/abraunegg/onedrive/issues/815 + // Upload file via a OneDrive API session + JSONValue uploadSession; - ulong delayBeforeRetry = 0; - if (retryAfterValue != 0) { - // Use the HTTP Response Header Value - delayBeforeRetry = retryAfterValue; + // Calculate modification time + SysTime localFileLastModifiedTime = timeLastModified(fileToUpload).toUTC(); + localFileLastModifiedTime.fracSecs = Duration.zero; + + // Construct the fileSystemInfo JSON component needed to create the Upload Session + JSONValue fileSystemInfo = [ + "item": JSONValue([ + "@microsoft.graph.conflictBehavior": JSONValue("replace"), + "fileSystemInfo": JSONValue([ + "lastModifiedDateTime": localFileLastModifiedTime.toISOExtString() + ]) + ]) + ]; + + // Try to create the upload session for this file + uploadSession = activeOneDriveApiInstance.createUploadSession(parentDriveId, parentId, filename, eTag, fileSystemInfo); + + if (uploadSession.type() == JSONType.object) { + // a valid session object was created + if ("uploadUrl" in uploadSession) { + // Add the file path we are uploading to this JSON Session Data + uploadSession["localPath"] = fileToUpload; + // Save this session + saveSessionFile(threadUploadSessionFilePath, uploadSession); + } } else { - // Use a 120 second delay as a default given header value was zero - // This value is based on log files and data when determining correct process for 429 response handling - delayBeforeRetry = 120; - // Update that we are over-riding the provided value with a default - log.vdebug("HTTP Response Header retry-after value was 0 - Using a preconfigured default of: ", delayBeforeRetry); + // no valid session was created + log.vlog("Creation of OneDrive API Upload Session failed."); + // return upload() will return a JSONValue response, create an empty JSONValue response to return + uploadSession = null; } + // Return the JSON + return uploadSession; + } + + // Save the session upload data + void saveSessionFile(string threadUploadSessionFilePath, JSONValue uploadSessionData) { - // Sleep thread as per request - log.log("Thread sleeping due to 'HTTP request returned status code 429' - The request has been throttled"); - log.log("Sleeping for ", delayBeforeRetry, " seconds"); - Thread.sleep(dur!"seconds"(delayBeforeRetry)); + try { + std.file.write(threadUploadSessionFilePath, uploadSessionData.toString()); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } + + // Perform the upload of file via the Upload Session that was created + JSONValue performSessionFileUpload(OneDriveApi activeOneDriveApiInstance, ulong thisFileSize, JSONValue uploadSessionData, string threadUploadSessionFilePath) { + + // Response for upload + JSONValue uploadResponse; + + // Session JSON needs to contain valid elements + // Get the offset details + ulong fragmentSize = 10 * 2^^20; // 10 MiB + ulong fragmentCount = 0; + ulong fragSize = 0; + ulong offset = uploadSessionData["nextExpectedRanges"][0].str.splitter('-').front.to!ulong; + size_t iteration = (roundTo!int(double(thisFileSize)/double(fragmentSize)))+1; + Progress p = new Progress(iteration); + p.title = "Uploading"; + + // Initialise the download bar at 0% + p.next(); + + // Start the session upload using the active API instance for this thread + while (true) { + fragmentCount++; + log.vdebugNewLine("Fragment: ", fragmentCount, " of ", iteration); + p.next(); + log.vdebugNewLine("fragmentSize: ", fragmentSize, "offset: ", offset, " thisFileSize: ", thisFileSize ); + fragSize = fragmentSize < thisFileSize - offset ? fragmentSize : thisFileSize - offset; + log.vdebugNewLine("Using fragSize: ", fragSize); + + // fragSize must not be a negative value + if (fragSize < 0) { + // Session upload will fail + // not a JSON object - fragment upload failed + log.vlog("File upload session failed - invalid calculation of fragment size"); + if (exists(threadUploadSessionFilePath)) { + remove(threadUploadSessionFilePath); + } + // set uploadResponse to null as error + uploadResponse = null; + return uploadResponse; + } + + // If the resume upload fails, we need to check for a return code here + try { + uploadResponse = activeOneDriveApiInstance.uploadFragment( + uploadSessionData["uploadUrl"].str, + uploadSessionData["localPath"].str, + offset, + fragSize, + thisFileSize + ); + } catch (OneDriveException exception) { + // if a 100 uploadResponse is generated, continue + if (exception.httpStatusCode == 100) { + continue; + } + // There was an error uploadResponse from OneDrive when uploading the file fragment + + // Handle transient errors: + // 408 - Request Time Out + // 429 - Too Many Requests + // 503 - Service Unavailable + // 504 - Gateway Timeout + + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle 'HTTP request returned status code 429 (Too Many Requests)' first + log.vdebug("Fragment upload failed - received throttle request uploadResponse from OneDrive"); + if (exception.httpStatusCode == 429) { + auto retryAfterValue = activeOneDriveApiInstance.getRetryAfterValue(); + log.vdebug("Using Retry-After Value = ", retryAfterValue); + // Sleep thread as per request + log.log("\nThread sleeping due to 'HTTP request returned status code 429' - The request has been throttled"); + log.log("Sleeping for ", retryAfterValue, " seconds"); + Thread.sleep(dur!"seconds"(retryAfterValue)); + log.log("Retrying fragment upload"); + } else { + // Handle 408, 503 and 504 + auto errorArray = splitLines(exception.msg); + auto retryAfterValue = 30; + log.log("\nThread sleeping due to '", errorArray[0], "' - retrying applicable request in 30 seconds"); + log.log("Sleeping for ", retryAfterValue, " seconds"); + Thread.sleep(dur!"seconds"(retryAfterValue)); + log.log("Retrying fragment upload"); + } + } else { + // insert a new line as well, so that the below error is inserted on the console in the right location + log.vlog("\nFragment upload failed - received an exception response from OneDrive API"); + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + // retry fragment upload in case error is transient + log.vlog("Retrying fragment upload"); + } + + try { + uploadResponse = activeOneDriveApiInstance.uploadFragment( + uploadSessionData["uploadUrl"].str, + uploadSessionData["localPath"].str, + offset, + fragSize, + thisFileSize + ); + } catch (OneDriveException e) { + // OneDrive threw another error on retry + log.vlog("Retry to upload fragment failed"); + // display what the error is + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + // set uploadResponse to null as the fragment upload was in error twice + uploadResponse = null; + } catch (std.exception.ErrnoException e) { + // There was a file system error - display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + return uploadResponse; + } + } + + // was the fragment uploaded without issue? + if (uploadResponse.type() == JSONType.object){ + offset += fragmentSize; + if (offset >= thisFileSize) break; + // update the uploadSessionData details + uploadSessionData["expirationDateTime"] = uploadResponse["expirationDateTime"]; + uploadSessionData["nextExpectedRanges"] = uploadResponse["nextExpectedRanges"]; + saveSessionFile(threadUploadSessionFilePath, uploadSessionData); + } else { + // not a JSON object - fragment upload failed + log.vlog("File upload session failed - invalid response from OneDrive API"); + if (exists(threadUploadSessionFilePath)) { + remove(threadUploadSessionFilePath); + } + // set uploadResponse to null as error + uploadResponse = null; + return uploadResponse; + } + } + + // upload complete + p.next(); + writeln(); + if (exists(threadUploadSessionFilePath)) { + remove(threadUploadSessionFilePath); + } + + // Return the session upload response + return uploadResponse; + } + + // Delete an item on OneDrive + void uploadDeletedItem(Item itemToDelete, string path) { + + // Are we in a situation where we HAVE to keep the data online - do not delete the remote object + if (noRemoteDelete) { + if ((itemToDelete.type == ItemType.dir)) { + // Do not process remote directory delete + log.vlog("Skipping remote directory delete as --upload-only & --no-remote-delete configured"); + } else { + // Do not process remote file delete + log.vlog("Skipping remote file delete as --upload-only & --no-remote-delete configured"); + } + } else { + // Process the delete - delete the object online + log.log("Deleting item from OneDrive: ", path); + bool flagAsBigDelete = false; + + Item[] children; + ulong itemsToDelete; + + if ((itemToDelete.type == ItemType.dir)) { + // Query the database - how many objects will this remove? + children = getChildren(itemToDelete.driveId, itemToDelete.id); + // Count the returned items + the original item (1) + itemsToDelete = count(children) + 1; + log.vdebug("Number of items online to delete: ", itemsToDelete); + } else { + itemsToDelete = 1; + } + + // A local delete of a file|folder when using --monitor will issue a inotify event, which will trigger the local & remote data immediately be deleted + // The user may also be --sync process, so we are checking if something was deleted between application use + if (itemsToDelete >= appConfig.getValueLong("classify_as_big_delete")) { + // A big delete has been detected + flagAsBigDelete = true; + if (!appConfig.getValueBool("force")) { + log.error("ERROR: An attempt to remove a large volume of data from OneDrive has been detected. Exiting client to preserve data on OneDrive"); + log.error("ERROR: To delete a large volume of data use --force or increase the config value 'classify_as_big_delete' to a larger value"); + // Must exit here to preserve data on online + exit(-1); + } + } + + // Are we in a --dry-run scenario? + if (!dryRun) { + // We are not in a dry run scenario + log.vdebug("itemToDelete: ", itemToDelete); + + // Create new OneDrive API Instance + OneDriveApi uploadDeletedItemOneDriveApiInstance; + uploadDeletedItemOneDriveApiInstance = new OneDriveApi(appConfig); + uploadDeletedItemOneDriveApiInstance.initialise(); + + // what item are we trying to delete? + log.vdebug("Attempting to delete this single item id: ", itemToDelete.id, " from drive: ", itemToDelete.driveId); + try { + // perform the delete via the default OneDrive API instance + uploadDeletedItemOneDriveApiInstance.deleteById(itemToDelete.driveId, itemToDelete.id); + // Shutdown API + uploadDeletedItemOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(uploadDeletedItemOneDriveApiInstance); + } catch (OneDriveException e) { + if (e.httpStatusCode == 404) { + // item.id, item.eTag could not be found on the specified driveId + log.vlog("OneDrive reported: The resource could not be found to be deleted."); + } + } + + // Delete the reference in the local database + itemDB.deleteById(itemToDelete.driveId, itemToDelete.id); + if (itemToDelete.remoteId != null) { + // If the item is a remote item, delete the reference in the local database + itemDB.deleteById(itemToDelete.remoteDriveId, itemToDelete.remoteId); + } + } else { + // log that this is a dry-run activity + log.log("dry run - no delete activity"); + } + } + } + + // Get the children of an item id from the database + Item[] getChildren(string driveId, string id) { + + Item[] children; + children ~= itemDB.selectChildren(driveId, id); + foreach (Item child; children) { + if (child.type != ItemType.file) { + // recursively get the children of this child + children ~= getChildren(child.driveId, child.id); + } + } + return children; + } + + // Perform a 'reverse' delete of all child objects on OneDrive + void performReverseDeletionOfOneDriveItems(Item[] children, Item itemToDelete) { + + // Log what is happening + log.vdebug("Attempting a reverse delete of all child objects from OneDrive"); + + // Create a new API Instance for this thread and initialise it + OneDriveApi performReverseDeletionOneDriveApiInstance; + performReverseDeletionOneDriveApiInstance = new OneDriveApi(appConfig); + performReverseDeletionOneDriveApiInstance.initialise(); + + foreach_reverse (Item child; children) { + // Log the action + log.vdebug("Attempting to delete this child item id: ", child.id, " from drive: ", child.driveId); + // perform the delete via the default OneDrive API instance + performReverseDeletionOneDriveApiInstance.deleteById(child.driveId, child.id, child.eTag); + // delete the child reference in the local database + itemDB.deleteById(child.driveId, child.id); + } + // Log the action + log.vdebug("Attempting to delete this parent item id: ", itemToDelete.id, " from drive: ", itemToDelete.driveId); + // Perform the delete via the default OneDrive API instance + performReverseDeletionOneDriveApiInstance.deleteById(itemToDelete.driveId, itemToDelete.id, itemToDelete.eTag); + // Shutdown API instance + performReverseDeletionOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(performReverseDeletionOneDriveApiInstance); + } + + // Create a fake OneDrive response suitable for use with saveItem + JSONValue createFakeResponse(const(string) path) { + + import std.digest.sha; + // Generate a simulated JSON response which can be used + // At a minimum we need: + // 1. eTag + // 2. cTag + // 3. fileSystemInfo + // 4. file or folder. if file, hash of file + // 5. id + // 6. name + // 7. parent reference + + string fakeDriveId = appConfig.defaultDriveId; + string fakeRootId = appConfig.defaultRootId; + SysTime mtime = timeLastModified(path).toUTC(); + + // Need to update the 'fakeDriveId' & 'fakeRootId' with elements from the --dry-run database + // Otherwise some calls to validate objects will fail as the actual driveId being used is invalid + string parentPath = dirName(path); + Item databaseItem; + + if (parentPath != ".") { + // Not a 'root' parent + // For each driveid in the existing driveIDsArray + foreach (searchDriveId; driveIDsArray) { + log.vdebug("FakeResponse: searching database for: ", searchDriveId, " ", parentPath); + if (itemDB.selectByPath(parentPath, searchDriveId, databaseItem)) { + log.vdebug("FakeResponse: Found Database Item: ", databaseItem); + fakeDriveId = databaseItem.driveId; + fakeRootId = databaseItem.id; + } + } + } + + // real id / eTag / cTag are different format for personal / business account + auto sha1 = new SHA1Digest(); + ubyte[] fakedOneDriveItemValues = sha1.digest(path); + + JSONValue fakeResponse; + + if (isDir(path)) { + // path is a directory + fakeResponse = [ + "id": JSONValue(toHexString(fakedOneDriveItemValues)), + "cTag": JSONValue(toHexString(fakedOneDriveItemValues)), + "eTag": JSONValue(toHexString(fakedOneDriveItemValues)), + "fileSystemInfo": JSONValue([ + "createdDateTime": mtime.toISOExtString(), + "lastModifiedDateTime": mtime.toISOExtString() + ]), + "name": JSONValue(baseName(path)), + "parentReference": JSONValue([ + "driveId": JSONValue(fakeDriveId), + "driveType": JSONValue(appConfig.accountType), + "id": JSONValue(fakeRootId) + ]), + "folder": JSONValue("") + ]; + } else { + // path is a file + // compute file hash - both business and personal responses use quickXorHash + string quickXorHash = computeQuickXorHash(path); + + fakeResponse = [ + "id": JSONValue(toHexString(fakedOneDriveItemValues)), + "cTag": JSONValue(toHexString(fakedOneDriveItemValues)), + "eTag": JSONValue(toHexString(fakedOneDriveItemValues)), + "fileSystemInfo": JSONValue([ + "createdDateTime": mtime.toISOExtString(), + "lastModifiedDateTime": mtime.toISOExtString() + ]), + "name": JSONValue(baseName(path)), + "parentReference": JSONValue([ + "driveId": JSONValue(fakeDriveId), + "driveType": JSONValue(appConfig.accountType), + "id": JSONValue(fakeRootId) + ]), + "file": JSONValue([ + "hashes":JSONValue([ + "quickXorHash": JSONValue(quickXorHash) + ]) + + ]) + ]; + } + + log.vdebug("Generated Fake OneDrive Response: ", fakeResponse); + return fakeResponse; + } + + // Save JSON item details into the item database + void saveItem(JSONValue jsonItem) { + + // jsonItem has to be a valid object + if (jsonItem.type() == JSONType.object) { + // Check if the response JSON has an 'id', otherwise makeItem() fails with 'Key not found: id' + if (hasId(jsonItem)) { + // Are we in a --upload-only & --remove-source-files scenario? + // We do not want to add the item to the database in this situation as there is no local reference to the file post file deletion + // If the item is a directory, we need to add this to the DB, if this is a file, we dont add this, the parent path is not in DB, thus any new files in this directory are not added + if ((uploadOnly) && (localDeleteAfterUpload) && (isItemFile(jsonItem))) { + // Log that we skipping adding item to the local DB and the reason why + log.vdebug("Skipping adding to database as --upload-only & --remove-source-files configured"); + } else { + // What is the JSON item we are trying to create a DB record with? + log.vdebug("saveItem - creating DB item from this JSON: ", jsonItem); + // Takes a JSON input and formats to an item which can be used by the database + Item item = makeItem(jsonItem); + + // Is this JSON item a 'root' item? + if ((isItemRoot(jsonItem)) && (item.name == "root")) { + log.vdebug("Updating DB Item object with correct values as this is a 'root' object"); + item.parentId = null; // ensures that this database entry has no parent + // Check for parentReference + if (hasParentReference(jsonItem)) { + // Set the correct item.driveId + log.vdebug("ROOT JSON Item HAS parentReference .... setting item.driveId = jsonItem['parentReference']['driveId'].str"); + item.driveId = jsonItem["parentReference"]["driveId"].str; + } + + // We only should be adding our account 'root' to the database, not shared folder 'root' items + if (item.driveId != appConfig.defaultDriveId) { + // Shared Folder drive 'root' object .. we dont want this item + log.vdebug("NOT adding 'remote root' object to database: ", item); + return; + } + } + + // Add to the local database + log.vdebug("Adding to database: ", item); + itemDB.upsert(item); + + // If we have a remote drive ID, add this to our list of known drive id's + if (!item.remoteDriveId.empty) { + // Keep the driveIDsArray with unique entries only + if (!canFind(driveIDsArray, item.remoteDriveId)) { + // Add this drive id to the array to search with + driveIDsArray ~= item.remoteDriveId; + } + } + } + } else { + // log error + log.error("ERROR: OneDrive response missing required 'id' element"); + log.error("ERROR: ", jsonItem); + } + } else { + // log error + log.error("ERROR: An error was returned from OneDrive and the resulting response is not a valid JSON object"); + log.error("ERROR: Increase logging verbosity to assist determining why."); + } + } + + // Wrapper function for makeDatabaseItem so we can check to ensure that the item has the required hashes + Item makeItem(JSONValue onedriveJSONItem) { + + // Make the DB Item from the JSON data provided + Item newDatabaseItem = makeDatabaseItem(onedriveJSONItem); + + // Is this a 'file' item that has not been deleted? Deleted items have no hash + if ((newDatabaseItem.type == ItemType.file) && (!isItemDeleted(onedriveJSONItem))) { + // Does this item have a file size attribute? + if (hasFileSize(onedriveJSONItem)) { + // Is the file size greater than 0? + if (onedriveJSONItem["size"].integer > 0) { + // Does the DB item have any hashes as per the API provided JSON data? + if ((newDatabaseItem.quickXorHash.empty) && (newDatabaseItem.sha256Hash.empty)) { + // Odd .. there is no hash for this item .. why is that? + // Is there a 'file' JSON element? + if ("file" in onedriveJSONItem) { + // Microsoft OneDrive OneNote objects will report as files but have 'application/msonenote' and 'application/octet-stream' as mime types + if ((isMicrosoftOneNoteMimeType1(onedriveJSONItem)) || (isMicrosoftOneNoteMimeType2(onedriveJSONItem))) { + // Debug log output that this is a potential OneNote object + log.vdebug("This item is potentially an associated Microsoft OneNote Object Item"); + } else { + // Not a Microsoft OneNote Mime Type Object .. + string apiWarningMessage = "WARNING: OneDrive API inconsistency - this file does not have any hash: "; + // This is computationally expensive .. but we are only doing this if there are no hashses provided + bool parentInDatabase = itemDB.idInLocalDatabase(newDatabaseItem.driveId, newDatabaseItem.parentId); + // Is the parent id in the database? + if (parentInDatabase) { + // This is again computationally expensive .. calculate this item path to advise the user the actual path of this item that has no hash + string newItemPath = computeItemPath(newDatabaseItem.driveId, newDatabaseItem.parentId) ~ "/" ~ newDatabaseItem.name; + log.log(apiWarningMessage, newItemPath); + } else { + // Parent is not in the database .. why? + // Check if the parent item had been skipped .. + if (newDatabaseItem.parentId in skippedItems) { + log.vdebug(apiWarningMessage, "newDatabaseItem.parentId listed within skippedItems"); + } else { + // Use the item ID .. there is no other reference available, parent is not being skipped, so we should have been able to calculate this - but we could not + log.log(apiWarningMessage, newDatabaseItem.id); + } + } + } + } + } + } else { + // zero file size + log.vdebug("This item file is zero size - potentially no hash provided by the OneDrive API"); + } + } + } + + // Return the new database item + return newDatabaseItem; + } + + // Print the fileDownloadFailures and fileUploadFailures arrays if they are not empty + void displaySyncFailures() { + + // Were there any file download failures? + if (!fileDownloadFailures.empty) { + // There are download failures ... + log.log("\nFailed items to download from OneDrive: ", fileDownloadFailures.length); + foreach(failedFileToDownload; fileDownloadFailures) { + // List the detail of the item that failed to download + log.logAndNotify("Failed to download: ", failedFileToDownload); + + // Is this failed item in the DB? It should not be .. + Item downloadDBItem; + // Need to check all driveid's we know about, not just the defaultDriveId + foreach (searchDriveId; driveIDsArray) { + if (itemDB.selectByPath(failedFileToDownload, searchDriveId, downloadDBItem)) { + // item was found in the DB + log.error("ERROR: Failed Download Path found in database, must delete this item from the database .. it should not be in there if it failed to download"); + // Process the database entry removal. In a --dry-run scenario, this is being done against a DB copy + itemDB.deleteById(downloadDBItem.driveId, downloadDBItem.id); + if (downloadDBItem.remoteDriveId != null) { + // delete the linked remote folder + itemDB.deleteById(downloadDBItem.remoteDriveId, downloadDBItem.remoteId); + } + } + } + } + // Set the flag + syncFailures = true; + } + + // Were there any file upload failures? + if (!fileUploadFailures.empty) { + // There are download failures ... + log.log("\nFailed items to upload to OneDrive: ", fileUploadFailures.length); + foreach(failedFileToUpload; fileUploadFailures) { + // List the path of the item that failed to upload + log.logAndNotify("Failed to upload: ", failedFileToUpload); + + // Is this failed item in the DB? It should not be .. + Item uploadDBItem; + // Need to check all driveid's we know about, not just the defaultDriveId + foreach (searchDriveId; driveIDsArray) { + if (itemDB.selectByPath(failedFileToUpload, searchDriveId, uploadDBItem)) { + // item was found in the DB + log.error("ERROR: Failed Upload Path found in database, must delete this item from the database .. it should not be in there if it failed to upload"); + // Process the database entry removal. In a --dry-run scenario, this is being done against a DB copy + itemDB.deleteById(uploadDBItem.driveId, uploadDBItem.id); + if (uploadDBItem.remoteDriveId != null) { + // delete the linked remote folder + itemDB.deleteById(uploadDBItem.remoteDriveId, uploadDBItem.remoteId); + } + } + } + } + // Set the flag + syncFailures = true; + } + } + + // Generate a /delta compatible response - for use when we cant actually use /delta + // This is required when the application is configured to use National Azure AD deployments as these do not support /delta queries + // The same technique can also be used when we are using --single-directory. The parent objects up to the single directory target can be added, + // then once the target of the --single-directory request is hit, all of the children of that path can be queried, giving a much more focused + // JSON response which can then be processed, negating the need to continuously traverse the tree and 'exclude' items + JSONValue generateDeltaResponse(string pathToQuery = null) { + + // JSON value which will be responded with + JSONValue selfGeneratedDeltaResponse; + + // Function variables + Item searchItem; + JSONValue rootData; + JSONValue driveData; + JSONValue pathData; + JSONValue topLevelChildren; + JSONValue[] childrenData; + string nextLink; + + // Was a path to query passed in? + if (pathToQuery.empty) { + // Will query for the 'root' + pathToQuery = "."; + } + + // Create new OneDrive API Instance + OneDriveApi generateDeltaResponseOneDriveApiInstance; + generateDeltaResponseOneDriveApiInstance = new OneDriveApi(appConfig); + generateDeltaResponseOneDriveApiInstance.initialise(); + + if (!singleDirectoryScope) { + // In a --resync scenario, there is no DB data to query, so we have to query the OneDrive API here to get relevant details + try { + // Query the OneDrive API + pathData = generateDeltaResponseOneDriveApiInstance.getPathDetails(pathToQuery); + // Is the path on OneDrive local or remote to our account drive id? + if (isItemRemote(pathData)) { + // The path we are seeking is remote to our account drive id + searchItem.driveId = pathData["remoteItem"]["parentReference"]["driveId"].str; + searchItem.id = pathData["remoteItem"]["id"].str; + } else { + // The path we are seeking is local to our account drive id + searchItem.driveId = pathData["parentReference"]["driveId"].str; + searchItem.id = pathData["id"].str; + } + } catch (OneDriveException e) { + // Display error message + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + // Must exit here + generateDeltaResponseOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(generateDeltaResponseOneDriveApiInstance); + exit(-1); + } + } else { + // When setSingleDirectoryScope() was called, the following were set to the correct items, even if the path was remote: + // - singleDirectoryScopeDriveId + // - singleDirectoryScopeItemId + // Reuse these prior set values + searchItem.driveId = singleDirectoryScopeDriveId; + searchItem.id = singleDirectoryScopeItemId; + } + + // Before we get any data from the OneDrive API, flag any child object in the database as out-of-sync for this driveId & and object id + // Downgrade ONLY files associated with this driveId and idToQuery + log.vdebug("Downgrading all children for this searchItem.driveId (" ~ searchItem.driveId ~ ") and searchItem.id (" ~ searchItem.id ~ ") to an out-of-sync state"); + auto drivePathChildren = getChildren(searchItem.driveId, searchItem.id); + if (count(drivePathChildren) > 0) { + // Children to process and flag as out-of-sync + foreach (drivePathChild; drivePathChildren) { + // Flag any object in the database as out-of-sync for this driveId & and object id + log.vdebug("Downgrading item as out-of-sync: ", drivePathChild.id); + itemDB.downgradeSyncStatusFlag(drivePathChild.driveId, drivePathChild.id); + } + } + + // Get drive details for the provided driveId + try { + driveData = generateDeltaResponseOneDriveApiInstance.getPathDetailsById(searchItem.driveId, searchItem.id); + } catch (OneDriveException exception) { + log.vdebug("driveData = generateDeltaResponseOneDriveApiInstance.getPathDetailsById(searchItem.driveId, searchItem.id) generated a OneDriveException"); + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(generateDeltaResponseOneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to query path details on OneDrive - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.vdebug("Retrying Function: ", thisFunctionName); + generateDeltaResponse(pathToQuery); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + + // Was a valid JSON response for 'driveData' provided? + if (driveData.type() == JSONType.object) { + + // Dynamic output for a non-verbose run so that the user knows something is happening + if (log.verbose == 0) { + if (!appConfig.surpressLoggingOutput) { + log.fileOnly("Fetching items from the OneDrive API for Drive ID: ", searchItem.driveId); + // Use the dots to show the application is 'doing something' + write("Fetching items from the OneDrive API for Drive ID: ", searchItem.driveId, " ."); + } + } else { + log.vlog("Generating a /delta response from the OneDrive API for Drive ID: ", searchItem.driveId); + } + + // Process this initial JSON response + if (!isItemRoot(driveData)) { + // Get root details for the provided driveId + try { + rootData = generateDeltaResponseOneDriveApiInstance.getDriveIdRoot(searchItem.driveId); + } catch (OneDriveException exception) { + log.vdebug("rootData = onedrive.getDriveIdRoot(searchItem.driveId) generated a OneDriveException"); + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(generateDeltaResponseOneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to query drive root details on OneDrive - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.log("Retrying Query: rootData = generateDeltaResponseOneDriveApiInstance.getDriveIdRoot(searchItem.driveId)"); + rootData = generateDeltaResponseOneDriveApiInstance.getDriveIdRoot(searchItem.driveId); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + // Add driveData JSON data to array + log.vlog("Adding OneDrive root details for processing"); + childrenData ~= rootData; + } + + // Add driveData JSON data to array + log.vlog("Adding OneDrive folder details for processing"); + childrenData ~= driveData; + } else { + // driveData is an invalid JSON object + writeln("CODING TO DO: The query of OneDrive API to getPathDetailsById generated an invalid JSON response - thus we cant build our own /delta simulated response ... how to handle?"); + // Must exit here + generateDeltaResponseOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(generateDeltaResponseOneDriveApiInstance); + exit(-1); + } + + // For each child object, query the OneDrive API + for (;;) { + // query top level children + try { + topLevelChildren = generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink); + } catch (OneDriveException exception) { + // OneDrive threw an error + log.vdebug("------------------------------------------------------------------"); + log.vdebug("Query Error: topLevelChildren = generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink)"); + log.vdebug("driveId: ", searchItem.driveId); + log.vdebug("idToQuery: ", searchItem.id); + log.vdebug("nextLink: ", nextLink); + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(generateDeltaResponseOneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry topLevelChildren = generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink)"); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to query OneDrive top level drive children on OneDrive - retrying applicable request in 30 seconds"); + log.vdebug("generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink) previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + //log.vdebug("Retrying Query: generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink)"); + //topLevelChildren = generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink); + + log.vdebug("Retrying Function: ", thisFunctionName); + generateDeltaResponse(pathToQuery); + + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + + // process top level children + log.vlog("Adding ", count(topLevelChildren["value"].array), " OneDrive items for processing from the OneDrive 'root' folder"); + foreach (child; topLevelChildren["value"].array) { + // Check for any Client Side Filtering here ... we should skip querying the OneDrive API for 'folders' that we are going to just process and skip anyway. + // This avoids needless calls to the OneDrive API, and potentially speeds up this process. + if (!checkJSONAgainstClientSideFiltering(child)) { + // add this child to the array of objects + childrenData ~= child; + // is this child a folder? + if (isItemFolder(child)) { + // We have to query this folders children if childCount > 0 + if (child["folder"]["childCount"].integer > 0){ + // This child folder has children + string childIdToQuery = child["id"].str; + string childDriveToQuery = child["parentReference"]["driveId"].str; + auto childParentPath = child["parentReference"]["path"].str.split(":"); + string folderPathToScan = childParentPath[1] ~ "/" ~ child["name"].str; + + string pathForLogging; + // Are we in a --single-directory situation? If we are, the path we are using for logging needs to use the input path as a base + if (singleDirectoryScope) { + pathForLogging = appConfig.getValueString("single_directory") ~ "/" ~ child["name"].str; + } else { + pathForLogging = child["name"].str; + } + + // Query the children of this item + JSONValue[] grandChildrenData = queryForChildren(childDriveToQuery, childIdToQuery, folderPathToScan, pathForLogging); + foreach (grandChild; grandChildrenData.array) { + // add the grandchild to the array + childrenData ~= grandChild; + } + } + } + } + } + // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response + // to indicate more items are available and provide the request URL for the next page of items. + if ("@odata.nextLink" in topLevelChildren) { + // Update nextLink to next changeSet bundle + log.vdebug("Setting nextLink to (@odata.nextLink): ", nextLink); + nextLink = topLevelChildren["@odata.nextLink"].str; + } else break; + } + + if (log.verbose == 0) { + // Dynamic output for a non-verbose run so that the user knows something is happening + if (!appConfig.surpressLoggingOutput) { + writeln(); + } + } + + // Craft response from all returned JSON elements + selfGeneratedDeltaResponse = [ + "@odata.context": JSONValue("https://graph.microsoft.com/v1.0/$metadata#Collection(driveItem)"), + "value": JSONValue(childrenData.array) + ]; + + // Shutdown API + generateDeltaResponseOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(generateDeltaResponseOneDriveApiInstance); + + // Return the generated JSON response + return selfGeneratedDeltaResponse; + } + + // Query the OneDrive API for the specified child id for any children objects + JSONValue[] queryForChildren(string driveId, string idToQuery, string childParentPath, string pathForLogging) { + + // function variables + JSONValue thisLevelChildren; + JSONValue[] thisLevelChildrenData; + string nextLink; + + // Create new OneDrive API Instance + OneDriveApi queryChildrenOneDriveApiInstance; + queryChildrenOneDriveApiInstance = new OneDriveApi(appConfig); + queryChildrenOneDriveApiInstance.initialise(); + + for (;;) { + // query this level children + try { + thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance); + } catch (OneDriveException exception) { + + writeln("CODING TO DO: EXCEPTION HANDLING NEEDED: thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance)"); + + } + + if (log.verbose == 0) { + // Dynamic output for a non-verbose run so that the user knows something is happening + if (!appConfig.surpressLoggingOutput) { + write("."); + } + } + + // Was a valid JSON response for 'thisLevelChildren' provided? + if (thisLevelChildren.type() == JSONType.object) { + // process this level children + if (!childParentPath.empty) { + // We dont use childParentPath to log, as this poses an information leak risk. + // The full parent path of the child, as per the JSON might be: + // /Level 1/Level 2/Level 3/Child Shared Folder/some folder/another folder + // But 'Child Shared Folder' is what is shared, thus '/Level 1/Level 2/Level 3/' is a potential information leak if logged. + // Plus, the application output now shows accuratly what is being shared - so that is a good thing. + log.vlog("Adding ", count(thisLevelChildren["value"].array), " OneDrive items for processing from ", pathForLogging); + } + foreach (child; thisLevelChildren["value"].array) { + // Check for any Client Side Filtering here ... we should skip querying the OneDrive API for 'folders' that we are going to just process and skip anyway. + // This avoids needless calls to the OneDrive API, and potentially speeds up this process. + if (!checkJSONAgainstClientSideFiltering(child)) { + // add this child to the array of objects + thisLevelChildrenData ~= child; + // is this child a folder? + if (isItemFolder(child)){ + // We have to query this folders children if childCount > 0 + if (child["folder"]["childCount"].integer > 0){ + // This child folder has children + string childIdToQuery = child["id"].str; + string childDriveToQuery = child["parentReference"]["driveId"].str; + auto grandchildParentPath = child["parentReference"]["path"].str.split(":"); + string folderPathToScan = grandchildParentPath[1] ~ "/" ~ child["name"].str; + string newLoggingPath = pathForLogging ~ "/" ~ child["name"].str; + JSONValue[] grandChildrenData = queryForChildren(childDriveToQuery, childIdToQuery, folderPathToScan, newLoggingPath); + foreach (grandChild; grandChildrenData.array) { + // add the grandchild to the array + thisLevelChildrenData ~= grandChild; + } + } + } + } + } + // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response + // to indicate more items are available and provide the request URL for the next page of items. + if ("@odata.nextLink" in thisLevelChildren) { + // Update nextLink to next changeSet bundle + nextLink = thisLevelChildren["@odata.nextLink"].str; + log.vdebug("Setting nextLink to (@odata.nextLink): ", nextLink); + } else break; + + } else { + // Invalid JSON response when querying this level children + log.vdebug("INVALID JSON response when attempting a retry of parent function - queryForChildren(driveId, idToQuery, childParentPath, pathForLogging)"); + // retry thisLevelChildren = queryThisLevelChildren + log.vdebug("Thread sleeping for an additional 30 seconds"); + Thread.sleep(dur!"seconds"(30)); + log.vdebug("Retry this call thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance)"); + thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance); + } + } + + // Shutdown API instance + queryChildrenOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(queryChildrenOneDriveApiInstance); + + // return response + return thisLevelChildrenData; + } + + // Query the OneDrive API for the child objects for this element + JSONValue queryThisLevelChildren(string driveId, string idToQuery, string nextLink, OneDriveApi queryChildrenOneDriveApiInstance) { + + // function variables + JSONValue thisLevelChildren; + + // query children + try { + // attempt API call + log.vdebug("Attempting Query: thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)"); + thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink); + log.vdebug("Query 'thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)' performed successfully"); + } catch (OneDriveException exception) { + // OneDrive threw an error + log.vdebug("------------------------------------------------------------------"); + log.vdebug("Query Error: thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)"); + log.vdebug("driveId: ", driveId); + log.vdebug("idToQuery: ", idToQuery); + log.vdebug("nextLink: ", nextLink); + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(queryChildrenOneDriveApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to query OneDrive drive item children - retrying applicable request in 30 seconds"); + log.vdebug("thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink) previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.vdebug("Retrying Function: ", thisFunctionName); + queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance); + + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + + // return response + return thisLevelChildren; + } + + // Traverses the provided path online, via the OneDrive API, following correct parent driveId and itemId elements across the account + // to find if this full path exists. If this path exists online, the last item in the object path will be returned as a full JSON item. + // + // If the createPathIfMissing = false + no path exists online, a null invalid JSON item will be returned. + // If the createPathIfMissing = true + no path exists online, the requested path will be created in the correct location online. The resulting + // response to the directory creation will then be returned. + // + // This function also ensures that each path in the requested path actually matches the requested element to ensure that the OneDrive API response + // is not falsely matching a 'case insensitive' match to the actual request which is a POSIX compliance issue. + JSONValue queryOneDriveForSpecificPathAndCreateIfMissing(string thisNewPathToSearch, bool createPathIfMissing) { + + // function variables + JSONValue getPathDetailsAPIResponse; + string currentPathTree; + Item parentDetails; + JSONValue topLevelChildren; + string nextLink; + bool directoryFoundOnline = false; + bool posixIssue = false; + + // Create a new API Instance for this thread and initialise it + OneDriveApi queryOneDriveForSpecificPath; + queryOneDriveForSpecificPath = new OneDriveApi(appConfig); + queryOneDriveForSpecificPath.initialise(); + + foreach (thisFolderName; pathSplitter(thisNewPathToSearch)) { + log.vdebug("Testing for the existance online of this folder path: ", thisFolderName); + directoryFoundOnline = false; + + // If this is '.' this is the account root + if (thisFolderName == ".") { + currentPathTree = thisFolderName; + } else { + currentPathTree = currentPathTree ~ "/" ~ thisFolderName; + } + + log.vdebug("Attempting to query OneDrive for this path: ", currentPathTree); + + // What query do we use? + if (thisFolderName == ".") { + // Query the root, set the right details + try { + getPathDetailsAPIResponse = queryOneDriveForSpecificPath.getPathDetails(currentPathTree); + parentDetails = makeItem(getPathDetailsAPIResponse); + // Save item to the database + saveItem(getPathDetailsAPIResponse); + directoryFoundOnline = true; + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(queryOneDriveForSpecificPath); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to query path on OneDrive - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.vdebug("Retrying Function: ", thisFunctionName); + queryOneDriveForSpecificPathAndCreateIfMissing(thisNewPathToSearch, createPathIfMissing); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + } else { + // Ensure we have a valid driveId to search here + if (parentDetails.driveId.empty) { + parentDetails.driveId = appConfig.defaultDriveId; + } + + // If the prior JSON 'getPathDetailsAPIResponse' is on this account driveId .. then continue to use getPathDetails + if (parentDetails.driveId == appConfig.defaultDriveId) { + + try { + // Query OneDrive API for this path + getPathDetailsAPIResponse = queryOneDriveForSpecificPath.getPathDetails(currentPathTree); + // Portable Operating System Interface (POSIX) testing of JSON response from OneDrive API + performPosixTest(thisFolderName, getPathDetailsAPIResponse["name"].str); + // No POSIX issue with requested path element + parentDetails = makeItem(getPathDetailsAPIResponse); + // Save item to the database + saveItem(getPathDetailsAPIResponse); + directoryFoundOnline = true; + + // Is this JSON a remote object + if (isItemRemote(getPathDetailsAPIResponse)) { + // Remote Directory .. need a DB Tie Item + log.vdebug("Creating a DB TIE for this Shared Folder"); + // New DB Tie Item to bind the 'remote' path to our parent path + Item tieDBItem; + // Set the name + tieDBItem.name = parentDetails.name; + // Set the correct item type + tieDBItem.type = ItemType.dir; + // Set the right elements using the 'remote' of the parent as the 'actual' for this DB Tie + tieDBItem.driveId = parentDetails.remoteDriveId; + tieDBItem.id = parentDetails.remoteId; + // Set the correct mtime + tieDBItem.mtime = parentDetails.mtime; + // Add tie DB record to the local database + log.vdebug("Adding tie DB record to database: ", tieDBItem); + itemDB.upsert(tieDBItem); + // Update parentDetails to use the DB Tie record + parentDetails = tieDBItem; + } + + } catch (OneDriveException exception) { + if (exception.httpStatusCode == 404) { + directoryFoundOnline = false; + } else { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(queryOneDriveForSpecificPath); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry ", thisFunctionName); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + log.log(errorArray[0], " when attempting to query path on OneDrive - retrying applicable request in 30 seconds"); + log.vdebug(thisFunctionName, " previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + log.vdebug("Retrying Function: ", thisFunctionName); + queryOneDriveForSpecificPathAndCreateIfMissing(thisNewPathToSearch, createPathIfMissing); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + } + } else { + // parentDetails.driveId is not the account drive id - thus will be a remote shared item + log.vdebug("This parent directory is a remote object this next path will be on a remote drive"); + // For this parentDetails.driveId, parentDetails.id object, query the OneDrive API for it's children + for (;;) { + // Query this remote object for its children + topLevelChildren = queryOneDriveForSpecificPath.listChildren(parentDetails.driveId, parentDetails.id, nextLink); + // Process each child + foreach (child; topLevelChildren["value"].array) { + // Is this child a folder? + if (isItemFolder(child)) { + // Is this the child folder we are looking for, and is a POSIX match? + if (child["name"].str == thisFolderName) { + // EXACT MATCH including case sensitivity: Flag that we found the folder online + directoryFoundOnline = true; + // Use these details for the next entry path + getPathDetailsAPIResponse = child; + parentDetails = makeItem(getPathDetailsAPIResponse); + // Save item to the database + saveItem(getPathDetailsAPIResponse); + // No need to continue searching + break; + } else { + string childAsLower = toLower(child["name"].str); + string thisFolderNameAsLower = toLower(thisFolderName); + if (childAsLower == thisFolderNameAsLower) { + // This is a POSIX 'case in-sensitive match' ..... + // Local item name has a 'case-insensitive match' to an existing item on OneDrive + posixIssue = true; + throw new posixException(thisFolderName, child["name"].str); + } + } + } + } + + if (directoryFoundOnline) { + // We found the folder, no need to continue searching nextLink data + break; + } + + // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response + // to indicate more items are available and provide the request URL for the next page of items. + if ("@odata.nextLink" in topLevelChildren) { + // Update nextLink to next changeSet bundle + log.vdebug("Setting nextLink to (@odata.nextLink): ", nextLink); + nextLink = topLevelChildren["@odata.nextLink"].str; + } else break; + } + } + } + + // If we did not find the folder, we need to create this folder + if (!directoryFoundOnline) { + // Folder not found online + // Set any response to be an invalid JSON item + getPathDetailsAPIResponse = null; + // Was there a POSIX issue? + if (!posixIssue) { + // No POSIX issue + if (createPathIfMissing) { + // Create this path as it is missing on OneDrive online and there is no POSIX issue with a 'case-insensitive match' + log.vdebug("FOLDER NOT FOUND ONLINE AND WE ARE REQUESTED TO CREATE IT"); + log.vdebug("Create folder on this drive: ", parentDetails.driveId); + log.vdebug("Create folder as a child on this object: ", parentDetails.id); + log.vdebug("Create this folder name: ", thisFolderName); + + JSONValue newDriveItem = [ + "name": JSONValue(thisFolderName), + "folder": parseJSON("{}") + ]; + + JSONValue createByIdAPIResponse; + // Submit the creation request + // Fix for https://github.com/skilion/onedrive/issues/356 + if (!dryRun) { + try { + // Attempt to create a new folder on the configured parent driveId & parent id + createByIdAPIResponse = queryOneDriveForSpecificPath.createById(parentDetails.driveId, parentDetails.id, newDriveItem); + // Is the response a valid JSON object - validation checking done in saveItem + saveItem(createByIdAPIResponse); + // Set getPathDetailsAPIResponse to createByIdAPIResponse + getPathDetailsAPIResponse = createByIdAPIResponse; + } catch (OneDriveException e) { + // 409 - API Race Condition + if (e.httpStatusCode == 409) { + // When we attempted to create it, OneDrive responded that it now already exists + log.vlog("OneDrive reported that ", thisFolderName, " already exists .. OneDrive API race condition"); + } else { + // some other error from OneDrive was returned - display what it is + log.error("OneDrive generated an error when creating this path: ", thisFolderName); + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + } + } + } else { + // Simulate a successful 'directory create' & save it to the dryRun database copy + // The simulated response has to pass 'makeItem' as part of saveItem + auto fakeResponse = createFakeResponse(thisNewPathToSearch); + // Save item to the database + saveItem(fakeResponse); + } + } + } + } + } - // Reset retry-after value to zero as we have used this value now and it may be changed in the future to a different value - onedrive.resetRetryAfterValue(); + // Shutdown API instance + queryOneDriveForSpecificPath.shutdown(); + // Free object and memory + object.destroy(queryOneDriveForSpecificPath); + + // Output our search results + log.vdebug("queryOneDriveForSpecificPathAndCreateIfMissing.getPathDetailsAPIResponse = ", getPathDetailsAPIResponse); + return getPathDetailsAPIResponse; } - // Generage a /delta compatible response when using National Azure AD deployments that do not support /delta queries - // see: https://docs.microsoft.com/en-us/graph/deployments#supported-features - JSONValue generateDeltaResponse(const(char)[] driveId, const(char)[] idToQuery) - { - // JSON value which will be responded with - JSONValue deltaResponse; - // initial data - JSONValue rootData; - JSONValue driveData; - JSONValue topLevelChildren; - JSONValue[] childrenData; - string nextLink; + // Delete an item by it's path + // This function is only used in --monitor mode and --remove-directory directive + void deleteByPath(string path) { + + // function variables + Item dbItem; + + // Need to check all driveid's we know about, not just the defaultDriveId + bool itemInDB = false; + foreach (searchDriveId; driveIDsArray) { + if (itemDB.selectByPath(path, searchDriveId, dbItem)) { + // item was found in the DB + itemInDB = true; + break; + } + } + + // Was the item found in the database? + if (!itemInDB) { + // path to delete is not in the local database .. + // was this a --remove-directory attempt? + if (!appConfig.getValueBool("monitor")) { + // --remove-directory deletion attempt + log.error("The item to delete is not in the local database - unable to delete online"); + return; + } else { + // normal use .. --monitor being used + throw new SyncException("The item to delete is not in the local database"); + } + } + + // This needs to be enforced as we have to know the parent id of the object being deleted + if (dbItem.parentId == null) { + // the item is a remote folder, need to do the operation on the parent + enforce(itemDB.selectByPathWithoutRemote(path, appConfig.defaultDriveId, dbItem)); + } - // Get drive details for the provided driveId try { - driveData = onedrive.getPathDetailsById(driveId, idToQuery); + if (noRemoteDelete) { + // do not process remote delete + log.vlog("Skipping remote delete as --upload-only & --no-remote-delete configured"); + } else { + uploadDeletedItem(dbItem, path); + } } catch (OneDriveException e) { - log.vdebug("driveData = onedrive.getPathDetailsById(driveId, idToQuery) generated a OneDriveException"); - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - if (e.httpStatusCode == 429) { - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - retrying applicable request"); - handleOneDriveThrottleRequest(); + if (e.httpStatusCode == 404) { + log.log(e.msg); + } else { + // display what the error is + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + } + } + } + + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_move + // This function is only called in monitor mode when an move event is coming from + // inotify and we try to move the item. + void uploadMoveItem(string oldPath, string newPath) { + // Log that we are doing a move + log.log("Moving ", oldPath, " to ", newPath); + // Is this move unwanted? + bool unwanted = false; + // Item variables + Item oldItem, newItem, parentItem; + + // This not a Client Side Filtering check, nor a Microsoft Check, but is a sanity check that the path provided is UTF encoded correctly + // Check the std.encoding of the path against: Unicode 5.0, ASCII, ISO-8859-1, ISO-8859-2, WINDOWS-1250, WINDOWS-1251, WINDOWS-1252 + if (!unwanted) { + if(!isValid(newPath)) { + // Path is not valid according to https://dlang.org/phobos/std_encoding.html + log.logAndNotify("Skipping item - invalid character encoding sequence: ", newPath); + unwanted = true; + } + } + + // Check this path against the Client Side Filtering Rules + // - check_nosync + // - skip_dotfiles + // - skip_symlinks + // - skip_file + // - skip_dir + // - sync_list + // - skip_size + if (!unwanted) { + unwanted = checkPathAgainstClientSideFiltering(newPath); + } + + // Check against Microsoft OneDrive restriction and limitations about Windows naming files + if (!unwanted) { + unwanted = checkPathAgainstMicrosoftNamingRestrictions(newPath); + } + + // 'newPath' has passed client side filtering validation + if (!unwanted) { + + if (!itemDB.selectByPath(oldPath, appConfig.defaultDriveId, oldItem)) { + // The old path|item is not synced with the database, upload as a new file + log.log("Moved local item was not in-sync with local databse - uploading as new item"); + uploadNewFile(newPath); + return; + } + + if (oldItem.parentId == null) { + // the item is a remote folder, need to do the operation on the parent + enforce(itemDB.selectByPathWithoutRemote(oldPath, appConfig.defaultDriveId, oldItem)); + } + + if (itemDB.selectByPath(newPath, appConfig.defaultDriveId, newItem)) { + // the destination has been overwritten + log.log("Moved local item overwrote an existing item - deleting old online item"); + uploadDeletedItem(newItem, newPath); + } + + if (!itemDB.selectByPath(dirName(newPath), appConfig.defaultDriveId, parentItem)) { + // the parent item is not in the database + throw new SyncException("Can't move an item to an unsynced directory"); + } + + if (oldItem.driveId != parentItem.driveId) { + // items cannot be moved between drives + uploadDeletedItem(oldItem, oldPath); + + // what sort of move is this? + if (isFile(newPath)) { + // newPath is a file + uploadNewFile(newPath); + } else { + // newPath is a directory + scanLocalFilesystemPathForNewData(newPath); } - if (e.httpStatusCode == 504) { - log.vdebug("Retrying original request that generated the HTTP 504 (Gateway Timeout) - retrying applicable request"); - Thread.sleep(dur!"seconds"(30)); + } else { + if (!exists(newPath)) { + // is this --monitor use? + if (appConfig.getValueBool("monitor")) { + log.vlog("uploadMoveItem target has disappeared: ", newPath); + return; + } + } + + // Configure the modification JSON item + SysTime mtime; + if (appConfig.getValueBool("monitor")) { + // Use the newPath modified timestamp + mtime = timeLastModified(newPath).toUTC(); + } else { + // Use the current system time + mtime = Clock.currTime().toUTC(); + } + + JSONValue data = [ + "name": JSONValue(baseName(newPath)), + "parentReference": JSONValue([ + "id": parentItem.id + ]), + "fileSystemInfo": JSONValue([ + "lastModifiedDateTime": mtime.toISOExtString() + ]) + ]; + + // Perform the move operation on OneDrive + JSONValue response; + + // Create a new API Instance for this thread and initialise it + OneDriveApi movePathOnlineApiInstance; + movePathOnlineApiInstance = new OneDriveApi(appConfig); + movePathOnlineApiInstance.initialise(); + + try { + response = movePathOnlineApiInstance.updateById(oldItem.driveId, oldItem.id, data, oldItem.eTag); + } catch (OneDriveException e) { + if (e.httpStatusCode == 412) { + // OneDrive threw a 412 error, most likely: ETag does not match current item's value + // Retry without eTag + log.vdebug("File Move Failed - OneDrive eTag / cTag match issue"); + log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting to move the file - gracefully handling error"); + string nullTag = null; + // move the file but without the eTag + response = movePathOnlineApiInstance.updateById(oldItem.driveId, oldItem.id, data, nullTag); + } + } + // Shutdown API instance + movePathOnlineApiInstance.shutdown(); + // Free object and memory + object.destroy(movePathOnlineApiInstance); + + // save the move response from OneDrive in the database + // Is the response a valid JSON object - validation checking done in saveItem + saveItem(response); + } + } else { + // Moved item is unwanted + log.log("Item has been moved to a location that is excluded from sync operations. Removing item from OneDrive"); + uploadDeletedItem(oldItem, oldPath); + } + } + + // Perform integrity validation of the file that was uploaded + bool performUploadIntegrityValidationChecks(JSONValue uploadResponse, string localFilePath, ulong localFileSize) { + + bool integrityValid = false; + + if (!disableUploadValidation) { + // Integrity validation has not been disabled (this is the default so we are always integrity checking our uploads) + if (uploadResponse.type() == JSONType.object) { + // Provided JSON is a valid JSON + ulong uploadFileSize = uploadResponse["size"].integer; + string uploadFileHash = uploadResponse["file"]["hashes"]["quickXorHash"].str; + string localFileHash = computeQuickXorHash(localFilePath); + + if ((localFileSize == uploadFileSize) && (localFileHash == uploadFileHash)) { + // Uploaded file integrity intact + log.vdebug("Uploaded local file matches reported online size and hash values"); + integrityValid = true; + } else { + // Upload integrity failure .. what failed? + // There are 2 scenarios where this happens: + // 1. Failed Transfer + // 2. Upload file is going to a SharePoint Site, where Microsoft enriches the file with additional metadata with no way to disable + log.logAndNotify("WARNING: Uploaded file integrity failure for: ", localFilePath); + + // What integrity failed - size? + if (localFileSize != uploadFileSize) { + log.vlog("WARNING: Uploaded file integrity failure - Size Mismatch"); + } + // What integrity failed - hash? + if (localFileHash != uploadFileHash) { + log.vlog("WARNING: Uploaded file integrity failure - Hash Mismatch"); + } + + // What account type is this? + if (appConfig.accountType != "personal") { + // Not a personal account, thus the integrity failure is most likely due to SharePoint + log.vlog("CAUTION: Microsoft SharePoint enhances files after you upload them, which means this file may now have technical differences from your local copy, resulting in an integrity issue."); + log.vlog("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details"); + } + // How can this be disabled? + log.log("To disable the integrity checking of uploaded files use --disable-upload-validation"); } - // Retry original request by calling function again to avoid replicating any further error handling - driveData = onedrive.getPathDetailsById(driveId, idToQuery); } else { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + log.log("Upload file validation unable to be performed: input JSON was invalid"); + log.log("WARNING: Skipping upload integrity check for: ", localFilePath); } + } else { + // We are bypassing integrity checks due to --disable-upload-validation + log.vdebug("Upload file validation disabled due to --disable-upload-validation"); + log.vlog("WARNING: Skipping upload integrity check for: ", localFilePath); + } + + // Is the file integrity online valid? + return integrityValid; + } + + // Query Office 365 SharePoint Shared Library site name to obtain it's Drive ID + void querySiteCollectionForDriveID(string sharepointLibraryNameToQuery) { + // Steps to get the ID: + // 1. Query https://graph.microsoft.com/v1.0/sites?search= with the name entered + // 2. Evaluate the response. A valid response will contain the description and the id. If the response comes back with nothing, the site name cannot be found or no access + // 3. If valid, use the returned ID and query the site drives + // https://graph.microsoft.com/v1.0/sites//drives + // 4. Display Shared Library Name & Drive ID + + string site_id; + string drive_id; + bool found = false; + JSONValue siteQuery; + string nextLink; + string[] siteSearchResults; + + // Create a new API Instance for this thread and initialise it + OneDriveApi querySharePointLibraryNameApiInstance; + querySharePointLibraryNameApiInstance = new OneDriveApi(appConfig); + querySharePointLibraryNameApiInstance.initialise(); + + // The account type must not be a personal account type + if (appConfig.accountType == "personal") { + log.error("ERROR: A OneDrive Personal Account cannot be used with --get-sharepoint-drive-id. Please re-authenticate your client using a OneDrive Business Account."); + return; } - if (!isItemRoot(driveData)) { - // Get root details for the provided driveId + // What query are we performing? + writeln(); + log.log("Office 365 Library Name Query: ", sharepointLibraryNameToQuery); + + for (;;) { try { - rootData = onedrive.getDriveIdRoot(driveId); + siteQuery = querySharePointLibraryNameApiInstance.o365SiteSearch(nextLink); } catch (OneDriveException e) { - log.vdebug("rootData = onedrive.getDriveIdRoot(driveId) generated a OneDriveException"); + log.error("ERROR: Query of OneDrive for Office 365 Library Name failed"); + // Forbidden - most likely authentication scope needs to be updated + if (e.httpStatusCode == 403) { + log.error("ERROR: Authentication scope needs to be updated. Use --reauth and re-authenticate client."); + return; + } + // Requested resource cannot be found + if (e.httpStatusCode == 404) { + string siteSearchUrl; + if (nextLink.empty) { + siteSearchUrl = querySharePointLibraryNameApiInstance.getSiteSearchUrl(); + } else { + siteSearchUrl = nextLink; + } + // log the error + log.error("ERROR: Your OneDrive Account and Authentication Scope cannot access this OneDrive API: ", siteSearchUrl); + log.error("ERROR: To resolve, please discuss this issue with whomever supports your OneDrive and SharePoint environment."); + return; + } + // HTTP request returned status code 429 (Too Many Requests) + if (e.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(querySharePointLibraryNameApiInstance); + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query OneDrive drive children"); + } // HTTP request returned status code 504 (Gateway Timeout) or 429 retry if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - if (e.httpStatusCode == 429) { - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - retrying applicable request"); - handleOneDriveThrottleRequest(); - } + // re-try the specific changes queries if (e.httpStatusCode == 504) { - log.vdebug("Retrying original request that generated the HTTP 504 (Gateway Timeout) - retrying applicable request"); + log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query Sharepoint Sites - retrying applicable request"); + log.vdebug("siteQuery = onedrive.o365SiteSearch(nextLink) previously threw an error - retrying"); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); Thread.sleep(dur!"seconds"(30)); } - // Retry original request by calling function again to avoid replicating any further error handling - rootData = onedrive.getDriveIdRoot(driveId); - - } else { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); - } - } - // Add driveData JSON data to array - log.vlog("Adding OneDrive root details for processing"); - childrenData ~= rootData; - } - - // Add driveData JSON data to array - log.vlog("Adding OneDrive folder details for processing"); - childrenData ~= driveData; - - for (;;) { - // query top level children - try { - topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink); - } catch (OneDriveException e) { - // OneDrive threw an error - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Query Error: topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - log.vdebug("nextLink: ", nextLink); - - // HTTP request returned status code 404 (Not Found) - if (e.httpStatusCode == 404) { - // Stop application - log.log("\n\nOneDrive returned a 'HTTP 404 - Item not found'"); - log.log("The item id to query was not found on OneDrive"); - log.log("\nRemove your '", cfg.databaseFilePath, "' file and try to sync again\n"); - } - - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query OneDrive drive children"); - } - - // HTTP request returned status code 500 (Internal Server Error) - if (e.httpStatusCode == 500) { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + // re-try original request - retried for 429 and 504 + try { + log.vdebug("Retrying Query: siteQuery = onedrive.o365SiteSearch(nextLink)"); + siteQuery = querySharePointLibraryNameApiInstance.o365SiteSearch(nextLink); + log.vdebug("Query 'siteQuery = onedrive.o365SiteSearch(nextLink)' performed successfully on re-try"); + } catch (OneDriveException e) { + // display what the error is + log.vdebug("Query Error: siteQuery = onedrive.o365SiteSearch(nextLink) on re-try after delay"); + // error was not a 504 this time + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + return; + } + } else { + // display what the error is + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + return; + } + } + + // is siteQuery a valid JSON object & contain data we can use? + if ((siteQuery.type() == JSONType.object) && ("value" in siteQuery)) { + // valid JSON object + log.vdebug("O365 Query Response: ", siteQuery); + + foreach (searchResult; siteQuery["value"].array) { + // Need an 'exclusive' match here with sharepointLibraryNameToQuery as entered + log.vdebug("Found O365 Site: ", searchResult); + + // 'displayName' and 'id' have to be present in the search result record in order to query the site + if (("displayName" in searchResult) && ("id" in searchResult)) { + if (sharepointLibraryNameToQuery == searchResult["displayName"].str){ + // 'displayName' matches search request + site_id = searchResult["id"].str; + JSONValue siteDriveQuery; + + try { + siteDriveQuery = querySharePointLibraryNameApiInstance.o365SiteDrives(site_id); + } catch (OneDriveException e) { + log.error("ERROR: Query of OneDrive for Office Site ID failed"); + // display what the error is + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + return; + } + + // is siteDriveQuery a valid JSON object & contain data we can use? + if ((siteDriveQuery.type() == JSONType.object) && ("value" in siteDriveQuery)) { + // valid JSON object + foreach (driveResult; siteDriveQuery["value"].array) { + // Display results + writeln("-----------------------------------------------"); + log.vdebug("Site Details: ", driveResult); + found = true; + writeln("Site Name: ", searchResult["displayName"].str); + writeln("Library Name: ", driveResult["name"].str); + writeln("drive_id: ", driveResult["id"].str); + writeln("Library URL: ", driveResult["webUrl"].str); + } + // closeout + writeln("-----------------------------------------------"); + } else { + // not a valid JSON object + log.error("ERROR: There was an error performing this operation on OneDrive"); + log.error("ERROR: Increase logging verbosity to assist determining why."); + return; + } + } + } else { + // 'displayName', 'id' or ''webUrl' not present in JSON results for a specific site + string siteNameAvailable = "Site 'name' was restricted by OneDrive API permissions"; + bool displayNameAvailable = false; + bool idAvailable = false; + if ("name" in searchResult) siteNameAvailable = searchResult["name"].str; + if ("displayName" in searchResult) displayNameAvailable = true; + if ("id" in searchResult) idAvailable = true; + + // Display error details for this site data + writeln(); + log.error("ERROR: SharePoint Site details not provided for: ", siteNameAvailable); + log.error("ERROR: The SharePoint Site results returned from OneDrive API do not contain the required items to match. Please check your permissions with your site administrator."); + log.error("ERROR: Your site security settings is preventing the following details from being accessed: 'displayName' or 'id'"); + log.vlog(" - Is 'displayName' available = ", displayNameAvailable); + log.vlog(" - Is 'id' available = ", idAvailable); + log.error("ERROR: To debug this further, please increase verbosity (--verbose or --verbose --verbose) to provide further insight as to what details are actually being returned."); + } } - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query OneDrive drive children - retrying applicable request"); - log.vdebug("topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); - } - // re-try original request - retried for 429 and 504 - try { - log.vdebug("Retrying Query: topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)"); - topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink); - log.vdebug("Query 'topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)' performed successfully on re-try"); - } catch (OneDriveException e) { - // display what the error is - log.vdebug("Query Error: topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink) on re-try after delay"); - // error was not a 504 this time - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - } - } else { - // Default operation if not 404, 410, 429, 500 or 504 errors - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - } - } - - // process top level children - log.vlog("Adding ", count(topLevelChildren["value"].array), " OneDrive items for processing from OneDrive folder"); - foreach (child; topLevelChildren["value"].array) { - // add this child to the array of objects - childrenData ~= child; - // is this child a folder? - if (isItemFolder(child)){ - // We have to query this folders children if childCount > 0 - if (child["folder"]["childCount"].integer > 0){ - // This child folder has children - string childIdToQuery = child["id"].str; - string childDriveToQuery = child["parentReference"]["driveId"].str; - auto childParentPath = child["parentReference"]["path"].str.split(":"); - string folderPathToScan = childParentPath[1] ~ "/" ~ child["name"].str; - string pathForLogging = "/" ~ driveData["name"].str ~ "/" ~ child["name"].str; - JSONValue[] grandChildrenData = queryForChildren(childDriveToQuery, childIdToQuery, folderPathToScan, pathForLogging); - foreach (grandChild; grandChildrenData.array) { - // add the grandchild to the array - childrenData ~= grandChild; + if(!found) { + // The SharePoint site we are searching for was not found in this bundle set + // Add to siteSearchResults so we can display what we did find + string siteSearchResultsEntry; + foreach (searchResult; siteQuery["value"].array) { + // We can only add the displayName if it is available + if ("displayName" in searchResult) { + // Use the displayName + siteSearchResultsEntry = " * " ~ searchResult["displayName"].str; + siteSearchResults ~= siteSearchResultsEntry; + } else { + // Add, but indicate displayName unavailable, use id + if ("id" in searchResult) { + siteSearchResultsEntry = " * " ~ "Unknown displayName (Data not provided by API), Site ID: " ~ searchResult["id"].str; + siteSearchResults ~= siteSearchResultsEntry; + } else { + // displayName and id unavailable, display in debug log the entry + log.vdebug("Bad SharePoint Data for site: ", searchResult); + } } } } + } else { + // not a valid JSON object + log.error("ERROR: There was an error performing this operation on OneDrive"); + log.error("ERROR: Increase logging verbosity to assist determining why."); + return; } + // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response // to indicate more items are available and provide the request URL for the next page of items. - if ("@odata.nextLink" in topLevelChildren) { - // Update nextLink to next changeSet bundle + if ("@odata.nextLink" in siteQuery) { + // Update nextLink to next set of SharePoint library names + nextLink = siteQuery["@odata.nextLink"].str; log.vdebug("Setting nextLink to (@odata.nextLink): ", nextLink); - nextLink = topLevelChildren["@odata.nextLink"].str; } else break; } - // craft response from all returned elements - deltaResponse = [ - "@odata.context": JSONValue("https://graph.microsoft.com/v1.0/$metadata#Collection(driveItem)"), - "value": JSONValue(childrenData.array) - ]; + // Was the intended target found? + if(!found) { + + // Was the search a wildcard? + if (sharepointLibraryNameToQuery != "*") { + // Only print this out if the search was not a wildcard + writeln(); + log.error("ERROR: The requested SharePoint site could not be found. Please check it's name and your permissions to access the site."); + } + // List all sites returned to assist user + writeln(); + log.log("The following SharePoint site names were returned:"); + foreach (searchResultEntry; siteSearchResults) { + // list the display name that we use to match against the user query + log.log(searchResultEntry); + } + } - // return the generated JSON response - return deltaResponse; + // Shutdown API instance + querySharePointLibraryNameApiInstance.shutdown(); + // Free object and memory + object.destroy(querySharePointLibraryNameApiInstance); } - // query child for children - JSONValue[] queryForChildren(const(char)[] driveId, const(char)[] idToQuery, const(char)[] childParentPath, string pathForLogging) - { - // function variables - JSONValue thisLevelChildren; - JSONValue[] thisLevelChildrenData; - string nextLink; - + // Query the sync status of the client and the local system + void queryOneDriveForSyncStatus(string pathToQueryStatusOn) { + + // Query the account driveId and rootId to get the /delta JSON information + // Process that JSON data for relevancy + + // Function variables + ulong downloadSize = 0; + string deltaLink = null; + string driveIdToQuery = appConfig.defaultDriveId; + string itemIdToQuery = appConfig.defaultRootId; + JSONValue deltaChanges; + + // Array of JSON items + JSONValue[] jsonItemsArray; + + // Query Database for a potential deltaLink starting point + deltaLink = itemDB.getDeltaLink(driveIdToQuery, itemIdToQuery); + + write("Querying the change status of Drive ID: ", driveIdToQuery, " ."); + // Query the OenDrive API using the applicable details, following nextLink if applicable + + // Create a new API Instance for querying /delta and initialise it + OneDriveApi getDeltaQueryOneDriveApiInstance; + getDeltaQueryOneDriveApiInstance = new OneDriveApi(appConfig); + getDeltaQueryOneDriveApiInstance.initialise(); + for (;;) { - // query children - thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink); - - // process this level children - if (!childParentPath.empty) { - // We dont use childParentPath to log, as this poses an information leak risk. - // The full parent path of the child, as per the JSON might be: - // /Level 1/Level 2/Level 3/Child Shared Folder/some folder/another folder - // But 'Child Shared Folder' is what is shared, thus '/Level 1/Level 2/Level 3/' is a potential information leak if logged. - // Plus, the application output now shows accuratly what is being shared - so that is a good thing. - log.vlog("Adding ", count(thisLevelChildren["value"].array), " OneDrive items for processing from ", pathForLogging); - } - foreach (child; thisLevelChildren["value"].array) { - // add this child to the array of objects - thisLevelChildrenData ~= child; - // is this child a folder? - if (isItemFolder(child)){ - // We have to query this folders children if childCount > 0 - if (child["folder"]["childCount"].integer > 0){ - // This child folder has children - string childIdToQuery = child["id"].str; - string childDriveToQuery = child["parentReference"]["driveId"].str; - auto grandchildParentPath = child["parentReference"]["path"].str.split(":"); - string folderPathToScan = grandchildParentPath[1] ~ "/" ~ child["name"].str; - string newLoggingPath = pathForLogging ~ "/" ~ child["name"].str; - JSONValue[] grandChildrenData = queryForChildren(childDriveToQuery, childIdToQuery, folderPathToScan, newLoggingPath); - foreach (grandChild; grandChildrenData.array) { - // add the grandchild to the array - thisLevelChildrenData ~= grandChild; + // Add a processing '.' + write("."); + + // Get the /delta changes via the OneDrive API + // getDeltaChangesByItemId has the re-try logic for transient errors + deltaChanges = getDeltaChangesByItemId(driveIdToQuery, itemIdToQuery, deltaLink, getDeltaQueryOneDriveApiInstance); + + // If the initial deltaChanges response is an invalid JSON object, keep trying .. + if (deltaChanges.type() != JSONType.object) { + while (deltaChanges.type() != JSONType.object) { + // Handle the invalid JSON response adn retry + log.vdebug("ERROR: Query of the OneDrive API via deltaChanges = getDeltaChangesByItemId() returned an invalid JSON response"); + deltaChanges = getDeltaChangesByItemId(driveIdToQuery, itemIdToQuery, deltaLink, getDeltaQueryOneDriveApiInstance); + } + } + + // We have a valid deltaChanges JSON array. This means we have at least 200+ JSON items to process. + // The API response however cannot be run in parallel as the OneDrive API sends the JSON items in the order in which they must be processed + foreach (onedriveJSONItem; deltaChanges["value"].array) { + // is the JSON a root object - we dont want to count this + if (!isItemRoot(onedriveJSONItem)) { + // Files are the only item that we want to calculate + if (isItemFile(onedriveJSONItem)) { + // JSON item is a file + // Is the item filtered out due to client side filtering rules? + if (!checkJSONAgainstClientSideFiltering(onedriveJSONItem)) { + // Is the path of this JSON item 'in-scope' or 'out-of-scope' ? + if (pathToQueryStatusOn != "/") { + // We need to check the path of this item against pathToQueryStatusOn + string thisItemPath = ""; + if (("path" in onedriveJSONItem["parentReference"]) != null) { + // If there is a parent reference path, try and use it + string selfBuiltPath = onedriveJSONItem["parentReference"]["path"].str ~ "/" ~ onedriveJSONItem["name"].str; + auto splitPath = selfBuiltPath.split("root:"); + thisItemPath = splitPath[1]; + } else { + // no parent reference path available + thisItemPath = onedriveJSONItem["name"].str; + } + // can we find 'pathToQueryStatusOn' in 'thisItemPath' ? + if (canFind(thisItemPath, pathToQueryStatusOn)) { + // Add this to the array for processing + jsonItemsArray ~= onedriveJSONItem; + } + } else { + // We are not doing a --single-directory check + // Add this to the array for processing + jsonItemsArray ~= onedriveJSONItem; + } } } } } - // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response - // to indicate more items are available and provide the request URL for the next page of items. - if ("@odata.nextLink" in thisLevelChildren) { - // Update nextLink to next changeSet bundle - nextLink = thisLevelChildren["@odata.nextLink"].str; - log.vdebug("Setting nextLink to (@odata.nextLink): ", nextLink); - } else break; + + // The response may contain either @odata.deltaLink or @odata.nextLink + if ("@odata.deltaLink" in deltaChanges) { + deltaLink = deltaChanges["@odata.deltaLink"].str; + log.vdebug("Setting next deltaLink to (@odata.deltaLink): ", deltaLink); + } + + // Update deltaLink to next changeSet bundle + if ("@odata.nextLink" in deltaChanges) { + deltaLink = deltaChanges["@odata.nextLink"].str; + log.vdebug("Setting next deltaLink to (@odata.nextLink): ", deltaLink); + } + else break; } + // Needed after printing out '....' when fetching changes from OneDrive API + writeln(); - // return response - return thisLevelChildrenData; - } - - // Query from OneDrive the child objects for this element - JSONValue queryThisLevelChildren(const(char)[] driveId, const(char)[] idToQuery, string nextLink) - { - JSONValue thisLevelChildren; - - // query children - try { - // attempt API call - log.vdebug("Attempting Query: thisLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)"); - thisLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink); - log.vdebug("Query 'thisLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)' performed successfully"); - } catch (OneDriveException e) { - // OneDrive threw an error - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Query Error: thisLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - log.vdebug("nextLink: ", nextLink); + // Are there any JSON items to process? + if (count(jsonItemsArray) != 0) { + // There are items to process + foreach (onedriveJSONItem; jsonItemsArray.array) { - // HTTP request returned status code 404 (Not Found) - if (e.httpStatusCode == 404) { - // Stop application - log.log("\n\nOneDrive returned a 'HTTP 404 - Item not found'"); - log.log("The item id to query was not found on OneDrive"); - log.log("\nRemove your '", cfg.databaseFilePath, "' file and try to sync again\n"); - } - - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query OneDrive drive children"); - } - - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - // transient error - try again in 30 seconds - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query OneDrive drive children - retrying applicable request"); - log.vdebug("thisLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); + // variables we need + string thisItemParentDriveId; + string thisItemId; + string thisItemHash; + bool existingDBEntry = false; + + // Is this file a remote item (on a shared folder) ? + if (isItemRemote(onedriveJSONItem)) { + // remote drive item + thisItemParentDriveId = onedriveJSONItem["remoteItem"]["parentReference"]["driveId"].str; + thisItemId = onedriveJSONItem["id"].str; + } else { + // standard drive item + thisItemParentDriveId = onedriveJSONItem["parentReference"]["driveId"].str; + thisItemId = onedriveJSONItem["id"].str; } - // re-try original request - retried for 429 and 504 - but loop back calling this function - log.vdebug("Retrying Query: thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink)"); - thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink); + + // Get the file hash + thisItemHash = onedriveJSONItem["file"]["hashes"]["quickXorHash"].str; + + // Check if the item has been seen before + Item existingDatabaseItem; + existingDBEntry = itemDB.selectById(thisItemParentDriveId, thisItemId, existingDatabaseItem); + + if (existingDBEntry) { + // item exists in database .. do the database details match the JSON record? + if (existingDatabaseItem.quickXorHash != thisItemHash) { + // file hash is different, will trigger a download event + downloadSize = downloadSize + onedriveJSONItem["size"].integer; + } + } else { + // item does not exist in the database + // this item has already passed client side filtering rules (skip_dir, skip_file, sync_list) + downloadSize = downloadSize + onedriveJSONItem["size"].integer; + } + } + } + + // Was anything detected that would constitute a download? + if (downloadSize > 0) { + // we have something to download + if (pathToQueryStatusOn != "/") { + writeln("The selected local directory via --single-directory is out of sync with Microsoft OneDrive"); } else { - // Default operation if not 404, 429 or 504 errors - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + writeln("The configured local 'sync_dir' directory is out of sync with Microsoft OneDrive"); } + writeln("Approximate data to download from Microsoft OneDrive: ", (downloadSize/1024), " KB"); + } else { + // No changes were returned + writeln("There are no pending changes from Microsoft OneDrive; your local directory matches the data online."); } - // return response - return thisLevelChildren; } - // OneDrive Business Shared Folder support - void listOneDriveBusinessSharedFolders() - { - // List OneDrive Business Shared Folders - log.log("\nListing available OneDrive Business Shared Folders:"); - // Query the GET /me/drive/sharedWithMe API - JSONValue graphQuery; - try { - graphQuery = onedrive.getSharedWithMe(); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // HTTP request returned status code 401 (Unauthorized) - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - handleClientUnauthorised(); - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - graphQuery = onedrive.getSharedWithMe();"); - graphQuery = onedrive.getSharedWithMe(); - } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); - } - } + // Query OneDrive for file details of a given path, returning either the 'webURL' or 'lastModifiedBy' JSON facet + void queryOneDriveForFileDetails(string inputFilePath, string runtimePath, string outputType) { + + // Calculate the full local file path + string fullLocalFilePath = buildNormalizedPath(buildPath(runtimePath, inputFilePath)); - if (graphQuery.type() == JSONType.object) { - if (count(graphQuery["value"].array) == 0) { - // no shared folders returned - write("\nNo OneDrive Business Shared Folders were returned\n"); - } else { - // shared folders were returned - log.vdebug("onedrive.getSharedWithMe API Response: ", graphQuery); - foreach (searchResult; graphQuery["value"].array) { - // loop variables - string sharedFolderName; - string sharedByName; - string sharedByEmail; - // is the shared item with us a 'folder' ? - // we only handle folders, not files or other items - if (isItemFolder(searchResult)) { - // Debug response output - log.vdebug("shared folder entry: ", searchResult); - sharedFolderName = searchResult["name"].str; + // Query if file is valid locally + if (exists(fullLocalFilePath)) { + // search drive_id list + string[] distinctDriveIds = itemDB.selectDistinctDriveIds(); + bool pathInDB = false; + Item dbItem; + + foreach (searchDriveId; distinctDriveIds) { + // Does this path exist in the database, use the 'inputFilePath' + if (itemDB.selectByPath(inputFilePath, searchDriveId, dbItem)) { + // item is in the database + pathInDB = true; + JSONValue fileDetailsFromOneDrive; + + // Create a new API Instance for this thread and initialise it + OneDriveApi queryOneDriveForFileDetailsApiInstance; + queryOneDriveForFileDetailsApiInstance = new OneDriveApi(appConfig); + queryOneDriveForFileDetailsApiInstance.initialise(); + + try { + fileDetailsFromOneDrive = queryOneDriveForFileDetailsApiInstance.getPathDetailsById(dbItem.driveId, dbItem.id); + } catch (OneDriveException exception) { + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + return; + } + + // Is the API response a valid JSON file? + if (fileDetailsFromOneDrive.type() == JSONType.object) { + + // debug output of response + log.vdebug("API Response: ", fileDetailsFromOneDrive); - // configure who this was shared by - if ("sharedBy" in searchResult["remoteItem"]["shared"]) { - // we have shared by details we can use - if ("displayName" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByName = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["displayName"].str; - } - if ("email" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByEmail = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["email"].str; + // What sort of response to we generate + // --get-file-link response + if (outputType == "URL") { + if ((fileDetailsFromOneDrive.type() == JSONType.object) && ("webUrl" in fileDetailsFromOneDrive)) { + // Valid JSON object + writeln(); + writeln("WebURL: ", fileDetailsFromOneDrive["webUrl"].str); } } - // Output query result - log.log("---------------------------------------"); - log.log("Shared Folder: ", sharedFolderName); - if ((sharedByName != "") && (sharedByEmail != "")) { - log.log("Shared By: ", sharedByName, " (", sharedByEmail, ")"); - } else { - if (sharedByName != "") { - log.log("Shared By: ", sharedByName); + + // --modified-by response + if (outputType == "ModifiedBy") { + if ((fileDetailsFromOneDrive.type() == JSONType.object) && ("lastModifiedBy" in fileDetailsFromOneDrive)) { + // Valid JSON object + writeln(); + writeln("Last modified: ", fileDetailsFromOneDrive["lastModifiedDateTime"].str); + writeln("Last modified by: ", fileDetailsFromOneDrive["lastModifiedBy"]["user"]["displayName"].str); + // if 'email' provided, add this to the output + if ("email" in fileDetailsFromOneDrive["lastModifiedBy"]["user"]) { + writeln("Email Address: ", fileDetailsFromOneDrive["lastModifiedBy"]["user"]["email"].str); + } } } - log.vlog("Item Id: ", searchResult["remoteItem"]["id"].str); - log.vlog("Parent Drive Id: ", searchResult["remoteItem"]["parentReference"]["driveId"].str); - if ("id" in searchResult["remoteItem"]["parentReference"]) { - log.vlog("Parent Item Id: ", searchResult["remoteItem"]["parentReference"]["id"].str); + + // --create-share-link response + if (outputType == "ShareableLink") { + + JSONValue accessScope; + JSONValue createShareableLinkResponse; + string thisDriveId = fileDetailsFromOneDrive["parentReference"]["driveId"].str; + string thisItemId = fileDetailsFromOneDrive["id"].str; + string fileShareLink; + bool writeablePermissions = appConfig.getValueBool("with_editing_perms"); + + // What sort of shareable link is required? + if (writeablePermissions) { + // configure the read-write access scope + accessScope = [ + "type": "edit", + "scope": "anonymous" + ]; + } else { + // configure the read-only access scope (default) + accessScope = [ + "type": "view", + "scope": "anonymous" + ]; + } + + // Try and create the shareable file link + try { + createShareableLinkResponse = queryOneDriveForFileDetailsApiInstance.createShareableLink(thisDriveId, thisItemId, accessScope); + } catch (OneDriveException exception) { + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + return; + } + + // Is the API response a valid JSON file? + if ((createShareableLinkResponse.type() == JSONType.object) && ("link" in createShareableLinkResponse)) { + // Extract the file share link from the JSON response + fileShareLink = createShareableLinkResponse["link"]["webUrl"].str; + writeln("File Shareable Link: ", fileShareLink); + if (writeablePermissions) { + writeln("Shareable Link has read-write permissions - use and provide with caution"); + } + } } } + + // Shutdown the API access + queryOneDriveForFileDetailsApiInstance.shutdown(); + // Free object and memory + object.destroy(queryOneDriveForFileDetailsApiInstance); } } - write("\n"); - } else { - // Log that an invalid JSON object was returned - log.error("ERROR: onedrive.getSharedWithMe call returned an invalid JSON Object"); - } - } - - // Query itemdb.computePath() and catch potential assert when DB consistency issue occurs - string computeItemPath(string thisDriveId, string thisItemId) - { - static import core.exception; - string calculatedPath; - log.vdebug("Attempting to calculate local filesystem path for ", thisDriveId, " and ", thisItemId); - try { - calculatedPath = itemdb.computePath(thisDriveId, thisItemId); - } catch (core.exception.AssertError) { - // broken tree in the database, we cant compute the path for this item id, exit - log.error("ERROR: A database consistency issue has been caught. A --resync is needed to rebuild the database."); - // Must exit here to preserve data - onedrive.shutdown(); - exit(-1); - } - - // return calculated path as string - return calculatedPath; - } - - void handleClientUnauthorised() - { - // common code for handling when a client is unauthorised - writeln(); - log.errorAndNotify("ERROR: Check your configuration as your refresh_token may be empty or invalid. You may need to issue a --reauth and re-authorise this client."); - writeln(); - // Must exit here - onedrive.shutdown(); - exit(-1); - } - - // Wrapper function for makeDatabaseItem so we can check if the item, if a file, has any hashes - private Item makeItem(JSONValue onedriveJSONItem) - { - Item newDatabaseItem = makeDatabaseItem(onedriveJSONItem); - - // Check for hashes in this DB item - if (newDatabaseItem.type == ItemType.file) { - // Does this file have a size greater than 0 - zero size files will potentially not have a hash - if (hasFileSize(onedriveJSONItem)) { - if (onedriveJSONItem["size"].integer > 0) { - // Does the item have any hashes? - if ((newDatabaseItem.quickXorHash.empty) && (newDatabaseItem.sha256Hash.empty)) { - // Odd .. no hash ...... - string apiMessage = "WARNING: OneDrive API inconsistency - this file does not have any hash: "; - // This is computationally expensive .. but we are only doing this if there are no hashses provided: - bool parentInDatabase = itemdb.idInLocalDatabase(newDatabaseItem.driveId, newDatabaseItem.parentId); - if (parentInDatabase) { - // Calculate this item path - string newItemPath = computeItemPath(newDatabaseItem.driveId, newDatabaseItem.parentId) ~ "/" ~ newDatabaseItem.name; - log.log(apiMessage, newItemPath); - } else { - // Use the item ID - log.log(apiMessage, newDatabaseItem.id); - } - } - } + + // was path found? + if (!pathInDB) { + // File has not been synced with OneDrive + log.error("Selected path has not been synced with OneDrive: ", inputFilePath); } + } else { + // File does not exist locally + log.error("Selected path not found on local system: ", inputFilePath); } - return newDatabaseItem; } - -} +} \ No newline at end of file diff --git a/src/upload.d b/src/upload.d deleted file mode 100644 index 012598a05..000000000 --- a/src/upload.d +++ /dev/null @@ -1,302 +0,0 @@ -import std.algorithm, std.conv, std.datetime, std.file, std.json; -import std.stdio, core.thread, std.string; -import progress, onedrive, util; -static import log; - -private long fragmentSize = 10 * 2^^20; // 10 MiB - -struct UploadSession -{ - private OneDriveApi onedrive; - private bool verbose; - // https://dev.onedrive.com/resources/uploadSession.htm - private JSONValue session; - // path where to save the session - private string sessionFilePath; - - this(OneDriveApi onedrive, string sessionFilePath) - { - assert(onedrive); - this.onedrive = onedrive; - this.sessionFilePath = sessionFilePath; - this.verbose = verbose; - } - - JSONValue upload(string localPath, const(char)[] parentDriveId, const(char)[] parentId, const(char)[] filename, const(char)[] eTag = null) - { - // Fix https://github.com/abraunegg/onedrive/issues/2 - // More Details https://github.com/OneDrive/onedrive-api-docs/issues/778 - - SysTime localFileLastModifiedTime = timeLastModified(localPath).toUTC(); - localFileLastModifiedTime.fracSecs = Duration.zero; - - JSONValue fileSystemInfo = [ - "item": JSONValue([ - "@name.conflictBehavior": JSONValue("replace"), - "fileSystemInfo": JSONValue([ - "lastModifiedDateTime": localFileLastModifiedTime.toISOExtString() - ]) - ]) - ]; - - // Try to create the upload session for this file - session = onedrive.createUploadSession(parentDriveId, parentId, filename, eTag, fileSystemInfo); - - if ("uploadUrl" in session){ - session["localPath"] = localPath; - save(); - return upload(); - } else { - // there was an error - log.vlog("Create file upload session failed ... skipping file upload"); - // return upload() will return a JSONValue response, create an empty JSONValue response to return - JSONValue response; - return response; - } - } - - /* Restore the previous upload session. - * Returns true if the session is valid. Call upload() to resume it. - * Returns false if there is no session or the session is expired. */ - bool restore() - { - if (exists(sessionFilePath)) { - log.vlog("Trying to restore the upload session ..."); - // We cant use JSONType.object check, as this is currently a string - // We cant use a try & catch block, as it does not catch std.json.JSONException - auto sessionFileText = readText(sessionFilePath); - if(canFind(sessionFileText,"@odata.context")) { - session = readText(sessionFilePath).parseJSON(); - } else { - log.vlog("Upload session resume data is invalid"); - remove(sessionFilePath); - return false; - } - - // Check the session resume file for expirationDateTime - if ("expirationDateTime" in session){ - // expirationDateTime in the file - auto expiration = SysTime.fromISOExtString(session["expirationDateTime"].str); - if (expiration < Clock.currTime()) { - log.vlog("The upload session is expired"); - return false; - } - if (!exists(session["localPath"].str)) { - log.vlog("The file does not exist anymore"); - return false; - } - // Can we read the file - as a permissions issue or file corruption will cause a failure on resume - // https://github.com/abraunegg/onedrive/issues/113 - if (readLocalFile(session["localPath"].str)){ - // able to read the file - // request the session status - JSONValue response; - try { - response = onedrive.requestUploadStatus(session["uploadUrl"].str); - } catch (OneDriveException e) { - // handle any onedrive error response - if (e.httpStatusCode == 400) { - log.vlog("Upload session not found"); - return false; - } - } - - // do we have a valid response from OneDrive? - if (response.type() == JSONType.object){ - // JSON object - if (("expirationDateTime" in response) && ("nextExpectedRanges" in response)){ - // has the elements we need - session["expirationDateTime"] = response["expirationDateTime"]; - session["nextExpectedRanges"] = response["nextExpectedRanges"]; - if (session["nextExpectedRanges"].array.length == 0) { - log.vlog("The upload session is completed"); - return false; - } - } else { - // bad data - log.vlog("Restore file upload session failed - invalid data response from OneDrive"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - return false; - } - } else { - // not a JSON object - log.vlog("Restore file upload session failed - invalid response from OneDrive"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - return false; - } - return true; - } else { - // unable to read the local file - log.vlog("Restore file upload session failed - unable to read the local file"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - return false; - } - } else { - // session file contains an error - cant resume - log.vlog("Restore file upload session failed - cleaning up session resume"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - return false; - } - } - return false; - } - - JSONValue upload() - { - // Response for upload - JSONValue response; - - // session JSON needs to contain valid elements - long offset; - long fileSize; - - if ("nextExpectedRanges" in session){ - offset = session["nextExpectedRanges"][0].str.splitter('-').front.to!long; - } - - if ("localPath" in session){ - fileSize = getSize(session["localPath"].str); - } - - if ("uploadUrl" in session){ - // Upload file via session created - // Upload Progress Bar - size_t iteration = (roundTo!int(double(fileSize)/double(fragmentSize)))+1; - Progress p = new Progress(iteration); - p.title = "Uploading"; - long fragmentCount = 0; - long fragSize = 0; - - // Initialise the download bar at 0% - p.next(); - - while (true) { - fragmentCount++; - log.vdebugNewLine("Fragment: ", fragmentCount, " of ", iteration); - p.next(); - log.vdebugNewLine("fragmentSize: ", fragmentSize, "offset: ", offset, " fileSize: ", fileSize ); - fragSize = fragmentSize < fileSize - offset ? fragmentSize : fileSize - offset; - log.vdebugNewLine("Using fragSize: ", fragSize); - - // fragSize must not be a negative value - if (fragSize < 0) { - // Session upload will fail - // not a JSON object - fragment upload failed - log.vlog("File upload session failed - invalid calculation of fragment size"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - // set response to null as error - response = null; - return response; - } - - // If the resume upload fails, we need to check for a return code here - try { - response = onedrive.uploadFragment( - session["uploadUrl"].str, - session["localPath"].str, - offset, - fragSize, - fileSize - ); - } catch (OneDriveException e) { - // if a 100 response is generated, continue - if (e.httpStatusCode == 100) { - continue; - } - // there was an error response from OneDrive when uploading the file fragment - // handle 'HTTP request returned status code 429 (Too Many Requests)' first - if (e.httpStatusCode == 429) { - auto retryAfterValue = onedrive.getRetryAfterValue(); - log.vdebug("Fragment upload failed - received throttle request response from OneDrive"); - log.vdebug("Using Retry-After Value = ", retryAfterValue); - // Sleep thread as per request - log.log("\nThread sleeping due to 'HTTP request returned status code 429' - The request has been throttled"); - log.log("Sleeping for ", retryAfterValue, " seconds"); - Thread.sleep(dur!"seconds"(retryAfterValue)); - log.log("Retrying fragment upload"); - } else { - // insert a new line as well, so that the below error is inserted on the console in the right location - log.vlog("\nFragment upload failed - received an exception response from OneDrive"); - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // retry fragment upload in case error is transient - log.vlog("Retrying fragment upload"); - } - - try { - response = onedrive.uploadFragment( - session["uploadUrl"].str, - session["localPath"].str, - offset, - fragSize, - fileSize - ); - } catch (OneDriveException e) { - // OneDrive threw another error on retry - log.vlog("Retry to upload fragment failed"); - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // set response to null as the fragment upload was in error twice - response = null; - } - } - // was the fragment uploaded without issue? - if (response.type() == JSONType.object){ - offset += fragmentSize; - if (offset >= fileSize) break; - // update the session details - session["expirationDateTime"] = response["expirationDateTime"]; - session["nextExpectedRanges"] = response["nextExpectedRanges"]; - save(); - } else { - // not a JSON object - fragment upload failed - log.vlog("File upload session failed - invalid response from OneDrive"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - // set response to null as error - response = null; - return response; - } - } - // upload complete - p.next(); - writeln(); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - return response; - } else { - // session elements were not present - log.vlog("Session has no valid upload URL ... skipping this file upload"); - // return an empty JSON response - response = null; - return response; - } - } - - string getUploadSessionLocalFilePath() { - // return the session file path - string localPath = ""; - if ("localPath" in session){ - localPath = session["localPath"].str; - } - return localPath; - } - - // save session details to temp file - private void save() - { - std.file.write(sessionFilePath, session.toString()); - } -} diff --git a/src/util.d b/src/util.d index cbaa5b8ef..9bb45b1fc 100644 --- a/src/util.d +++ b/src/util.d @@ -1,6 +1,12 @@ +// What is this module called? +module util; + +// What does this module require to function? +import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit; import std.base64; import std.conv; -import std.digest.crc, std.digest.sha; +import std.digest.crc; +import std.digest.sha; import std.net.curl; import std.datetime; import std.file; @@ -13,22 +19,24 @@ import std.algorithm; import std.uri; import std.json; import std.traits; -import qxor; import core.stdc.stdlib; +import core.thread; +// What other modules that we have created do we need to import? import log; import config; +import qxor; +import curlEngine; +// module variables shared string deviceName; -static this() -{ +static this() { deviceName = Socket.hostName; } -// gives a new name to the specified file or directory -void safeRename(const(char)[] path) -{ +// Creates a safe backup of the given item, and only performs the function if not in a --dry-run scenario +void safeBackup(const(char)[] path, bool dryRun) { auto ext = extension(path); auto newPath = path.chomp(ext) ~ "-" ~ deviceName; if (exists(newPath ~ ext)) { @@ -41,18 +49,55 @@ void safeRename(const(char)[] path) newPath = newPath2; } newPath ~= ext; - rename(path, newPath); + + // Perform the backup + log.vlog("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent data loss: ", path, " -> ", newPath); + if (!dryRun) { + rename(path, newPath); + } else { + log.vdebug("DRY-RUN: Skipping local file backup"); + } +} + +// Rename the given item, and only performs the function if not in a --dry-run scenario +void safeRename(const(char)[] oldPath, const(char)[] newPath, bool dryRun) { + // Perform the rename + if (!dryRun) { + log.vdebug("Calling rename(oldPath, newPath)"); + // rename physical path on disk + rename(oldPath, newPath); + } else { + log.vdebug("DRY-RUN: Skipping local file rename"); + } } // deletes the specified file without throwing an exception if it does not exists -void safeRemove(const(char)[] path) -{ +void safeRemove(const(char)[] path) { if (exists(path)) remove(path); } +// returns the CRC32 hex string of a file +string computeCRC32(string path) { + CRC32 crc; + auto file = File(path, "rb"); + foreach (ubyte[] data; chunks(file, 4096)) { + crc.put(data); + } + return crc.finish().toHexString().dup; +} + +// returns the SHA1 hash hex string of a file +string computeSha1Hash(string path) { + SHA1 sha; + auto file = File(path, "rb"); + foreach (ubyte[] data; chunks(file, 4096)) { + sha.put(data); + } + return sha.finish().toHexString().dup; +} + // returns the quickXorHash base64 string of a file -string computeQuickXorHash(string path) -{ +string computeQuickXorHash(string path) { QuickXor qxor; auto file = File(path, "rb"); foreach (ubyte[] data; chunks(file, 4096)) { @@ -72,8 +117,7 @@ string computeSHA256Hash(string path) { } // converts wildcards (*, ?) to regex -Regex!char wild2regex(const(char)[] pattern) -{ +Regex!char wild2regex(const(char)[] pattern) { string str; str.reserve(pattern.length + 2); str ~= "^"; @@ -115,53 +159,93 @@ Regex!char wild2regex(const(char)[] pattern) return regex(str, "i"); } -// returns true if the network connection is available -bool testNetwork(Config cfg) -{ - // Use low level HTTP struct - auto http = HTTP(); - http.url = "https://login.microsoftonline.com"; - // DNS lookup timeout - http.dnsTimeout = (dur!"seconds"(cfg.getValueLong("dns_timeout"))); - // Timeout for connecting - http.connectTimeout = (dur!"seconds"(cfg.getValueLong("connect_timeout"))); - // Data Timeout for HTTPS connections - http.dataTimeout = (dur!"seconds"(cfg.getValueLong("data_timeout"))); - // maximum time any operation is allowed to take - // This includes dns resolution, connecting, data transfer, etc. - http.operationTimeout = (dur!"seconds"(cfg.getValueLong("operation_timeout"))); - // What IP protocol version should be used when using Curl - IPv4 & IPv6, IPv4 or IPv6 - http.handle.set(CurlOption.ipresolve,cfg.getValueLong("ip_protocol_version")); // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only +// Test Internet access to Microsoft OneDrive +bool testInternetReachability(ApplicationConfig appConfig) { + // Use preconfigured object with all the correct http values assigned + auto curlEngine = new CurlEngine(); + curlEngine.initialise(appConfig.getValueLong("dns_timeout"), appConfig.getValueLong("connect_timeout"), appConfig.getValueLong("data_timeout"), appConfig.getValueLong("operation_timeout"), appConfig.defaultMaxRedirects, appConfig.getValueBool("debug_https"), appConfig.getValueString("user_agent"), appConfig.getValueBool("force_http_11"), appConfig.getValueLong("rate_limit"), appConfig.getValueLong("ip_protocol_version")); + // Configure the remaining items required + // URL to use + curlEngine.http.url = "https://login.microsoftonline.com"; // HTTP connection test method - http.method = HTTP.Method.head; + curlEngine.http.method = HTTP.Method.head; // Attempt to contact the Microsoft Online Service try { - log.vdebug("Attempting to contact online service"); - http.perform(); - log.vdebug("Shutting down HTTP engine as successfully reached OneDrive Online Service"); - http.shutdown(); + log.vdebug("Attempting to contact Microsoft OneDrive Login Service"); + curlEngine.http.perform(); + log.vdebug("Shutting down HTTP engine as successfully reached OneDrive Login Service"); + curlEngine.http.shutdown(); + // Free object and memory + object.destroy(curlEngine); return true; } catch (SocketException e) { // Socket issue log.vdebug("HTTP Socket Issue"); - log.error("Cannot connect to Microsoft OneDrive Service - Socket Issue"); + log.error("Cannot connect to Microsoft OneDrive Login Service - Socket Issue"); displayOneDriveErrorMessage(e.msg, getFunctionName!({})); return false; } catch (CurlException e) { // No network connection to OneDrive Service log.vdebug("No Network Connection"); - log.error("Cannot connect to Microsoft OneDrive Service - Network Connection Issue"); + log.error("Cannot connect to Microsoft OneDrive Login Service - Network Connection Issue"); displayOneDriveErrorMessage(e.msg, getFunctionName!({})); return false; } } +// Retry Internet access test to Microsoft OneDrive +bool retryInternetConnectivtyTest(ApplicationConfig appConfig) { + // re-try network connection to OneDrive + // https://github.com/abraunegg/onedrive/issues/1184 + // Back off & retry with incremental delay + int retryCount = 10000; + int retryAttempts = 1; + int backoffInterval = 1; + int maxBackoffInterval = 3600; + bool onlineRetry = false; + bool retrySuccess = false; + while (!retrySuccess){ + // retry to access OneDrive API + backoffInterval++; + int thisBackOffInterval = retryAttempts*backoffInterval; + log.vdebug(" Retry Attempt: ", retryAttempts); + if (thisBackOffInterval <= maxBackoffInterval) { + log.vdebug(" Retry In (seconds): ", thisBackOffInterval); + Thread.sleep(dur!"seconds"(thisBackOffInterval)); + } else { + log.vdebug(" Retry In (seconds): ", maxBackoffInterval); + Thread.sleep(dur!"seconds"(maxBackoffInterval)); + } + // perform the re-rty + onlineRetry = testInternetReachability(appConfig); + if (onlineRetry) { + // We are now online + log.log("Internet connectivity to Microsoft OneDrive service has been restored"); + retrySuccess = true; + } else { + // We are still offline + if (retryAttempts == retryCount) { + // we have attempted to re-connect X number of times + // false set this to true to break out of while loop + retrySuccess = true; + } + } + // Increment & loop around + retryAttempts++; + } + if (!onlineRetry) { + // Not online after 1.2 years of trying + log.error("ERROR: Was unable to reconnect to the Microsoft OneDrive service after 10000 attempts lasting over 1.2 years!"); + } + // return the state + return onlineRetry; +} + // Can we read the file - as a permissions issue or file corruption will cause a failure // https://github.com/abraunegg/onedrive/issues/113 // returns true if file can be accessed -bool readLocalFile(string path) -{ +bool readLocalFile(string path) { try { // attempt to read up to the first 1 byte of the file // validates we can 'read' the file based on file permissions @@ -175,8 +259,7 @@ bool readLocalFile(string path) } // calls globMatch for each string in pattern separated by '|' -bool multiGlobMatch(const(char)[] path, const(char)[] pattern) -{ +bool multiGlobMatch(const(char)[] path, const(char)[] pattern) { foreach (glob; pattern.split('|')) { if (globMatch!(std.path.CaseSensitive.yes)(path, glob)) { return true; @@ -185,8 +268,7 @@ bool multiGlobMatch(const(char)[] path, const(char)[] pattern) return false; } -bool isValidName(string path) -{ +bool isValidName(string path) { // Restriction and limitations about windows naming files // https://msdn.microsoft.com/en-us/library/aa365247 // https://support.microsoft.com/en-us/help/3125202/restrictions-and-limitations-when-you-sync-files-and-folders @@ -223,8 +305,7 @@ bool isValidName(string path) return matched; } -bool containsBadWhiteSpace(string path) -{ +bool containsBadWhiteSpace(string path) { // allow root item if (path == ".") { return true; @@ -248,8 +329,7 @@ bool containsBadWhiteSpace(string path) return m.empty; } -bool containsASCIIHTMLCodes(string path) -{ +bool containsASCIIHTMLCodes(string path) { // https://github.com/abraunegg/onedrive/issues/151 // If a filename contains ASCII HTML codes, regardless of if it gets encoded, it generates an error // Check if the filename contains an ASCII HTML code sequence @@ -265,17 +345,13 @@ bool containsASCIIHTMLCodes(string path) } // Parse and display error message received from OneDrive -void displayOneDriveErrorMessage(string message, string callingFunction) -{ +void displayOneDriveErrorMessage(string message, string callingFunction) { writeln(); log.error("ERROR: Microsoft OneDrive API returned an error with the following message:"); auto errorArray = splitLines(message); log.error(" Error Message: ", errorArray[0]); // Extract 'message' as the reason JSONValue errorMessage = parseJSON(replace(message, errorArray[0], "")); - // extra debug - log.vdebug("Raw Error Data: ", message); - log.vdebug("JSON Message: ", errorMessage); // What is the reason for the error if (errorMessage.type() == JSONType.object) { @@ -332,12 +408,48 @@ void displayOneDriveErrorMessage(string message, string callingFunction) } // Where in the code was this error generated - log.vlog(" Calling Function: ", callingFunction); + log.log(" Calling Function: ", callingFunction); + + // Extra Debug if we are using --verbose --verbose + log.vdebug("Raw Error Data: ", message); + log.vdebug("JSON Message: ", errorMessage); +} + +// Common code for handling when a client is unauthorised +void handleClientUnauthorised(int httpStatusCode, string message) { + // Split the lines of the error message + auto errorArray = splitLines(message); + // Extract 'message' as the reason + JSONValue errorMessage = parseJSON(replace(message, errorArray[0], "")); + log.vdebug("errorMessage: ", errorMessage); + + if (httpStatusCode == 400) { + // bad request or a new auth token is needed + // configure the error reason + writeln(); + string[] errorReason = splitLines(errorMessage["error_description"].str); + log.errorAndNotify(errorReason[0]); + writeln(); + log.errorAndNotify("ERROR: You will need to issue a --reauth and re-authorise this client to obtain a fresh auth token."); + writeln(); + } + + if (httpStatusCode == 401) { + + writeln("CODING TO DO: Triggered a 401 HTTP unauthorised response when client was unauthorised"); + + writeln(); + log.errorAndNotify("ERROR: Check your configuration as your refresh_token may be empty or invalid. You may need to issue a --reauth and re-authorise this client."); + writeln(); + + } + + // Must exit here + exit(EXIT_FAILURE); } // Parse and display error message received from the local file system -void displayFileSystemErrorMessage(string message, string callingFunction) -{ +void displayFileSystemErrorMessage(string message, string callingFunction) { writeln(); log.error("ERROR: The local file system returned an error with the following message:"); auto errorArray = splitLines(message); @@ -349,10 +461,17 @@ void displayFileSystemErrorMessage(string message, string callingFunction) ulong localActualFreeSpace = to!ulong(getAvailableDiskSpace(".")); if (localActualFreeSpace == 0) { // force exit - exit(-1); + exit(EXIT_FAILURE); } } +// Display the POSIX Error Message +void displayPosixErrorMessage(string message) { + writeln(); + log.error("ERROR: Microsoft OneDrive API returned data that highlights a POSIX compliance issue:"); + log.error(" Error Message: ", message); +} + // Get the function name that is being called to assist with identifying where an error is being generated string getFunctionName(alias func)() { return __traits(identifier, __traits(parent, func)) ~ "()\n"; @@ -527,7 +646,7 @@ void checkApplicationVersion() { thisVersionReleaseGracePeriod = thisVersionReleaseGracePeriod.add!"months"(1); log.vdebug("thisVersionReleaseGracePeriod: ", thisVersionReleaseGracePeriod); - // is this running version obsolete ? + // Is this running version obsolete ? if (!displayObsolete) { // if releaseGracePeriod > currentTime // display an information warning that there is a new release available @@ -556,54 +675,110 @@ void checkApplicationVersion() { } } -// Unit Tests -unittest -{ - assert(multiGlobMatch(".hidden", ".*")); - assert(multiGlobMatch(".hidden", "file|.*")); - assert(!multiGlobMatch("foo.bar", "foo|bar")); - // that should detect invalid file/directory name. - assert(isValidName(".")); - assert(isValidName("./general.file")); - assert(!isValidName("./ leading_white_space")); - assert(!isValidName("./trailing_white_space ")); - assert(!isValidName("./trailing_dot.")); - assert(!isValidName("./includesin the path")); - assert(!isValidName("./includes:in the path")); - assert(!isValidName(`./includes"in the path`)); - assert(!isValidName("./includes|in the path")); - assert(!isValidName("./includes?in the path")); - assert(!isValidName("./includes*in the path")); - assert(!isValidName("./includes / in the path")); - assert(!isValidName(`./includes\ in the path`)); - assert(!isValidName(`./includes\\ in the path`)); - assert(!isValidName(`./includes\\\\ in the path`)); - assert(!isValidName("./includes\\ in the path")); - assert(!isValidName("./includes\\\\ in the path")); - assert(!isValidName("./CON")); - assert(!isValidName("./CON.text")); - assert(!isValidName("./PRN")); - assert(!isValidName("./AUX")); - assert(!isValidName("./NUL")); - assert(!isValidName("./COM0")); - assert(!isValidName("./COM1")); - assert(!isValidName("./COM2")); - assert(!isValidName("./COM3")); - assert(!isValidName("./COM4")); - assert(!isValidName("./COM5")); - assert(!isValidName("./COM6")); - assert(!isValidName("./COM7")); - assert(!isValidName("./COM8")); - assert(!isValidName("./COM9")); - assert(!isValidName("./LPT0")); - assert(!isValidName("./LPT1")); - assert(!isValidName("./LPT2")); - assert(!isValidName("./LPT3")); - assert(!isValidName("./LPT4")); - assert(!isValidName("./LPT5")); - assert(!isValidName("./LPT6")); - assert(!isValidName("./LPT7")); - assert(!isValidName("./LPT8")); - assert(!isValidName("./LPT9")); +bool hasId(JSONValue item) { + return ("id" in item) != null; } + +bool hasQuota(JSONValue item) { + return ("quota" in item) != null; +} + +bool isItemDeleted(JSONValue item) { + return ("deleted" in item) != null; +} + +bool isItemRoot(JSONValue item) { + return ("root" in item) != null; +} + +bool hasParentReference(const ref JSONValue item) { + return ("parentReference" in item) != null; +} + +bool hasParentReferenceId(JSONValue item) { + return ("id" in item["parentReference"]) != null; +} + +bool hasParentReferencePath(JSONValue item) { + return ("path" in item["parentReference"]) != null; +} + +bool isFolderItem(const ref JSONValue item) { + return ("folder" in item) != null; +} + +bool isFileItem(const ref JSONValue item) { + return ("file" in item) != null; +} + +bool isItemRemote(const ref JSONValue item) { + return ("remoteItem" in item) != null; +} + +bool isItemFile(const ref JSONValue item) { + return ("file" in item) != null; +} + +bool isItemFolder(const ref JSONValue item) { + return ("folder" in item) != null; +} + +bool hasFileSize(const ref JSONValue item) { + return ("size" in item) != null; +} + +bool isDotFile(const(string) path) { + // always allow the root + if (path == ".") return false; + auto paths = pathSplitter(buildNormalizedPath(path)); + foreach(base; paths) { + if (startsWith(base, ".")){ + return true; + } + } + return false; +} + +bool isMalware(const ref JSONValue item) { + return ("malware" in item) != null; +} + +bool hasHashes(const ref JSONValue item) { + return ("hashes" in item["file"]) != null; +} + +bool hasQuickXorHash(const ref JSONValue item) { + return ("quickXorHash" in item["file"]["hashes"]) != null; +} + +bool hasSHA256Hash(const ref JSONValue item) { + return ("sha256Hash" in item["file"]["hashes"]) != null; +} + +bool isMicrosoftOneNoteMimeType1(const ref JSONValue item) { + return (item["file"]["mimeType"].str) == "application/msonenote"; +} + +bool isMicrosoftOneNoteMimeType2(const ref JSONValue item) { + return (item["file"]["mimeType"].str) == "application/octet-stream"; +} + +bool hasUploadURL(const ref JSONValue item) { + return ("uploadUrl" in item) != null; +} + +bool hasNextExpectedRanges(const ref JSONValue item) { + return ("nextExpectedRanges" in item) != null; +} + +bool hasLocalPath(const ref JSONValue item) { + return ("localPath" in item) != null; +} + +bool hasETag(const ref JSONValue item) { + return ("eTag" in item) != null; +} + +bool hasSharedElement(const ref JSONValue item) { + return ("eTag" in item) != null; +} \ No newline at end of file