From e1fd3ea3d6c877963799b260ab55b741cf97b4e5 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Fri, 15 Mar 2019 16:27:22 -0400 Subject: [PATCH 01/44] Release version 1.6.0 --- CHANGELOG.md | 10 ++++++++++ README.rst | 12 ++++++++---- fortls/__init__.py | 2 +- setup.py | 4 ++-- 4 files changed, 21 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bedc6b8..dc968a8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +## 1.6.0 + +### Improvements +* Add support for EXTERNAL subroutines +* Diagnostics: Missing subroutine/function arguments and argument declarations +* Diagnostics: Unimplemented deferred type-bound procedures +* Diagnostics: Unknown TYPE/KIND objects (only if candidate is visible in workspace) +* Diagnostics: IMPORT statements (missing objects and placement) +* Diagnostics: Basic handling for IMPLICIT statements + ## 1.5.1 ### Improvements diff --git a/README.rst b/README.rst index 618f56e..166d0f2 100644 --- a/README.rst +++ b/README.rst @@ -34,12 +34,16 @@ Language Server Features - Documentation parsing (`Doxygen `_ and `FORD `_ styles) - Diagnostics (limited) - - Multiple use of the same variable name - - Unknown module in USE statement - - Variable masking definition from parent scope + - Multiple definitions with the same variable name + - Variable definition masks definition from parent scope + - Missing subroutine/function arguments + - Unknown user-defined type used in "TYPE"/"CLASS" definition (only if visible in project) - Unclosed blocks/scopes - Invalid scope nesting - - Contains statement errors + - Unknown modules in "USE" statement + - Unimplemented deferred type-bound procedures + - Use of unimported variables/objects in interface blocks + - Statement placement errors ("CONTAINS", "IMPLICIT", "IMPORT") **Notes/Limitations:** diff --git a/fortls/__init__.py b/fortls/__init__.py index 2530ea7..4ce63ce 100644 --- a/fortls/__init__.py +++ b/fortls/__init__.py @@ -6,7 +6,7 @@ from .langserver import LangServer, read_file_split from .jsonrpc import JSONRPC2Connection, ReadWriter, path_from_uri from .parse_fortran import process_file, detect_fixed_format -__version__ = '1.5.1' +__version__ = '1.6.0' def error_exit(error_str): diff --git a/setup.py b/setup.py index c50bee3..308ed1b 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ # Versions should comply with PEP440. For a discussion on single-sourcing # the version across setup.py and the project code, see # https://packaging.python.org/en/latest/single_source_version.html - version='1.5.1', + version='1.6.0', description='FORTRAN Language Server for the Language Server Protocol', @@ -17,7 +17,7 @@ # The project's main homepage. url='https://github.com/hansec/fortran-language-server', - download_url = 'https://github.com/hansec/fortran-language-server/archive/v1.5.1.tar.gz', + download_url = 'https://github.com/hansec/fortran-language-server/archive/v1.6.0.tar.gz', author='Chris Hansen', author_email = 'hansec@uw.edu', From 2b3d5b1726d58effa7d59d397eddb939e79195a0 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Fri, 15 Mar 2019 09:04:14 -0400 Subject: [PATCH 02/44] Add initial support for "textDocument/codeAction" requests - Automatically generate unimplemented deferred procedures --- fortls/__init__.py | 41 +++++++++++- fortls/langserver.py | 18 ++++++ fortls/objects.py | 145 +++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 196 insertions(+), 8 deletions(-) diff --git a/fortls/__init__.py b/fortls/__init__.py index 4ce63ce..aaafd71 100644 --- a/fortls/__init__.py +++ b/fortls/__init__.py @@ -51,6 +51,10 @@ def main(): '--preserve_keyword_order', action="store_true", help="Display variable keywords information in original order (default: sort to consistent ordering)" ) + parser.add_argument( + '--enable_code_actions', action="store_true", + help="Enable experimental code actions (default: false)" + ) parser.add_argument( '--debug_log', action="store_true", help="Generate debug log in project root folder" @@ -100,6 +104,10 @@ def main(): '--debug_rename', type=str, help="Test rename request for specified file and position" ) + group.add_argument( + '--debug_actions', action="store_true", + help="Test codeAction request for specified file and position" + ) group.add_argument( '--debug_filepath', type=str, help="File path for language server tests" @@ -124,7 +132,7 @@ def main(): or args.debug_completion or args.debug_signature or args.debug_definition or args.debug_hover or args.debug_implementation or args.debug_references - or (args.debug_rename is not None) + or (args.debug_rename is not None) or args.debug_actions or (args.debug_rootpath is not None) or (args.debug_workspace_symbols is not None)) # @@ -135,7 +143,8 @@ def main(): "lowercase_intrinsics": args.lowercase_intrinsics, "use_signature_help": args.use_signature_help, "variable_hover": args.variable_hover, - "sort_keywords": (not args.preserve_keyword_order) + "sort_keywords": (not args.preserve_keyword_order), + "enable_code_actions": (args.enable_code_actions or args.debug_actions) } # if args.debug_parser: @@ -451,8 +460,34 @@ def main(): print(' + {0}'.format(line)) print() else: - print('Unknown file: "{0}"'.fromat(path)) + print('Unknown file: "{0}"'.format(path)) print('=======') + # + if args.debug_actions: + import pprint + pp = pprint.PrettyPrinter(indent=2, width=120) + print('\nTesting "textDocument/getActions" request:') + check_request_params(args) + s.serve_onSave({ + "params": { + "textDocument": {"uri": args.debug_filepath} + } + }) + action_results = s.serve_codeActions({ + "params": { + "textDocument": {"uri": args.debug_filepath}, + "range": { + "start": {"line": args.debug_line-1, "character": args.debug_char-1}, + "end": {"line": args.debug_line-1, "character": args.debug_char-1} + } + } + }) + for result in action_results: + print("Kind = '{0}', Title = '{1}'".format(result['kind'], result['title'])) + for editUri, editChange in result['edit']['changes'].items(): + print("\nChange: URI = '{0}'".format(editUri)) + pp.pprint(editChange) + print() tmpout.close() tmpin.close() # diff --git a/fortls/langserver.py b/fortls/langserver.py index f4112ee..8b9d286 100644 --- a/fortls/langserver.py +++ b/fortls/langserver.py @@ -346,6 +346,7 @@ def __init__(self, conn, debug_log=False, settings={}): self.use_signature_help = settings.get("use_signature_help", False) self.variable_hover = settings.get("variable_hover", False) self.sort_keywords = settings.get("sort_keywords", True) + self.enable_code_actions = settings.get("enable_code_actions", False) # Set object settings set_keyword_ordering(self.sort_keywords) @@ -390,6 +391,7 @@ def noop(request): "textDocument/didSave": self.serve_onSave, "textDocument/didClose": self.serve_onClose, "textDocument/didChange": self.serve_onChange, + "textDocument/codeAction": self.serve_codeActions, "initialized": noop, "workspace/didChangeWatchedFiles": noop, "workspace/symbol": self.serve_workspace_symbol, @@ -511,6 +513,8 @@ def serve_initialize(self, request): server_capabilities["signatureHelpProvider"] = { "triggerCharacters": ["(", ","] } + if self.enable_code_actions: + server_capabilities["codeActionProvider"] = True return {"capabilities": server_capabilities} # "workspaceSymbolProvider": True, # "streaming": False, @@ -1279,6 +1283,20 @@ def serve_rename(self, request): }) return {"changes": changes} + def serve_codeActions(self, request): + params = request["params"] + uri = params["textDocument"]["uri"] + sline = params["range"]["start"]["line"] + eline = params["range"]["end"]["line"] + path = path_from_uri(uri) + # Find object + if path in self.workspace: + file_obj = self.workspace[path]["ast"] + curr_scope = file_obj.get_inner_scope(sline) + if curr_scope is not None: + return curr_scope.get_actions(sline, eline) + return None + def send_diagnostics(self, uri): diag_results, diag_exp = self.get_diagnostics(uri) if diag_results is not None: diff --git a/fortls/objects.py b/fortls/objects.py index 6d8a8f7..d0214b7 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -285,6 +285,9 @@ def get_hover(self, long=False, include_doc=True, drop_arg=-1): def get_signature(self, drop_arg=-1): return None, None, None + def get_interface(self, name_replace=None, drop_arg=-1, change_strings=None): + return None + def get_children(self, public_only=False): return [] @@ -303,6 +306,9 @@ def get_implicit(self): return self.implicit_vars return parent_implicit + def get_actions(self, sline, eline): + return None + def is_optional(self): return False @@ -493,6 +499,26 @@ def check_use(self, obj_tree, file_contents): )) return errors + def add_subroutine(self, interface_string, no_contains=False): + edits = [] + line_number = self.eline - 1 + if (self.contains_start is None) and (not no_contains): + edits.append({ + "range": { + "start": {"line": line_number, "character": 0}, + "end": {"line": line_number, "character": 0} + }, + "newText": "CONTAINS\n" + }) + edits.append({ + "range": { + "start": {"line": line_number, "character": 0}, + "end": {"line": line_number, "character": 0} + }, + "newText": interface_string + "\n" + }) + return self.file.path, edits + class fortran_module(fortran_scope): def get_type(self): @@ -692,6 +718,27 @@ def get_signature(self, drop_arg=-1): call_sig, _ = self.get_snippet() return call_sig, self.get_documentation(), arg_sigs + def get_interface(self, name_replace=None, change_arg=-1, change_strings=None): + sub_sig, _ = self.get_snippet(name_replace=name_replace) + keyword_list = get_keywords(self.keywords) + keyword_list.append("SUBROUTINE ") + interface_array = [" ".join(keyword_list) + sub_sig] + for (i, arg_obj) in enumerate(self.arg_objs): + if arg_obj is None: + return None + arg_doc, _ = arg_obj.get_hover(include_doc=False) + if i == change_arg: + i0 = arg_doc.lower().find(change_strings[0].lower()) + if i0 >= 0: + i1 = i0 + len(change_strings[0]) + arg_doc = arg_doc[:i0] + change_strings[1] + arg_doc[i1:] + interface_array.append("{0} :: {1}".format(arg_doc, arg_obj.name)) + name = self.name + if name_replace is not None: + name = name_replace + interface_array.append("END SUBROUTINE {0}".format(name)) + return "\n".join(interface_array) + def check_valid_parent(self): if self.parent is not None: parent_type = self.parent.get_type() @@ -805,6 +852,35 @@ def get_hover(self, long=False, include_doc=True, drop_arg=-1): hover_array += doc_str.splitlines() return "\n ".join(hover_array), long + def get_interface(self, name_replace=None, change_arg=-1, change_strings=None): + fun_sig, _ = self.get_snippet(name_replace=name_replace) + keyword_list = [] + if self.return_type is not None: + keyword_list.append(self.return_type) + if self.result_obj is not None: + fun_sig += " RESULT({0})".format(self.result_obj.name) + keyword_list += get_keywords(self.keywords) + keyword_list.append("FUNCTION ") + interface_array = [" ".join(keyword_list) + fun_sig] + for (i, arg_obj) in enumerate(self.arg_objs): + if arg_obj is None: + return None + arg_doc, _ = arg_obj.get_hover(include_doc=False) + if i == change_arg: + i0 = arg_doc.lower().find(change_strings[0].lower()) + if i0 >= 0: + i1 = i0 + len(change_strings[0]) + arg_doc = arg_doc[:i0] + change_strings[1] + arg_doc[i1:] + interface_array.append("{0} :: {1}".format(arg_doc, arg_obj.name)) + if self.result_obj is not None: + arg_doc, _ = self.result_obj.get_hover(include_doc=False) + interface_array.append("{0} :: {1}".format(arg_doc, self.result_obj.name)) + name = self.name + if name_replace is not None: + name = name_replace + interface_array.append("END FUNCTION {0}".format(name)) + return "\n".join(interface_array) + class fortran_type(fortran_scope): def __init__(self, file_obj, line_number, name, keywords): @@ -871,17 +947,71 @@ def get_diagnostics(self, file_contents): errors = [] for in_child in self.in_children: if in_child.keywords.count(KEYWORD_ID_DICT['deferred']) > 0: - if self.contains_start is None: - line_number = self.eline - 1 - else: - line_number = self.contains_start - 1 errors.append(build_diagnostic( - line_number, 'Deferred procedure "{0}" not implemented'.format(in_child.name), + self.eline - 1, 'Deferred procedure "{0}" not implemented'.format(in_child.name), severity=1, related_path=in_child.file.path, related_line=in_child.sline-1, related_message='Inherited procedure declaration' )) return errors + def get_actions(self, sline, eline): + actions = [] + edits = [] + line_number = self.eline - 1 + if (line_number < sline) or (line_number > eline): + return actions + if self.contains_start is None: + edits.append({ + "range": { + "start": {"line": line_number, "character": 0}, + "end": {"line": line_number, "character": 0} + }, + "newText": "CONTAINS\n" + }) + # + diagnostics = [] + has_edits = False + file_uri = path_to_uri(self.file.path) + for in_child in self.in_children: + if in_child.keywords.count(KEYWORD_ID_DICT['deferred']) > 0: + # Get interface + interface_string = in_child.get_interface( + name_replace=in_child.name, + change_strings=('class({0})'.format(in_child.parent.name), 'CLASS({0})'.format(self.name)) + ) + if interface_string is None: + continue + interface_path, interface_edits = self.parent.add_subroutine(interface_string, no_contains=has_edits) + if interface_path != self.file.path: + continue + edits.append({ + "range": { + "start": {"line": line_number, "character": 0}, + "end": {"line": line_number, "character": 0} + }, + "newText": " PROCEDURE :: {0} => {0}\n".format(in_child.name) + }) + edits += interface_edits + diagnostics.append(build_diagnostic( + line_number, 'Deferred procedure "{0}" not implemented'.format(in_child.name), + severity=1, related_path=in_child.file.path, + related_line=in_child.sline-1, related_message='Inherited procedure declaration' + )) + has_edits = True + # + if has_edits: + actions.append({ + "title": "Implement deferred procedures", + "kind": "quickfix", + "edit": { + "changes": { + file_uri: edits + } + }, + "diagnostics": diagnostics + }) + return actions + class fortran_block(fortran_scope): def __init__(self, file_obj, line_number, name): @@ -1259,6 +1389,11 @@ def get_signature(self, drop_arg=-1): return call_sig, self.get_documentation(), arg_sigs return None, None, None + def get_interface(self, name_replace=None, change_arg=-1, change_strings=None): + if self.link_obj is not None: + return self.link_obj.get_interface(name_replace, self.drop_arg, change_strings) + return None + def resolve_link(self, obj_tree): if self.link_name is None: return From 3afb073a9ab38ef469d64e96db2568f399fe6113 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Fri, 15 Mar 2019 13:09:53 -0400 Subject: [PATCH 03/44] Parse keywords in subroutine/function definitions --- fortls/objects.py | 23 ++++++++++++++--------- fortls/parse_fortran.py | 16 ++++++++++++---- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/fortls/objects.py b/fortls/objects.py index d0214b7..5049adc 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -20,7 +20,10 @@ 'deferred', 'dimension', 'intent', - 'pass' + 'pass', + 'pure', + 'elemental', + 'recursive' ] KEYWORD_ID_DICT = {keyword: ind for (ind, keyword) in enumerate(KEYWORD_LIST)} # Type identifiers @@ -584,8 +587,8 @@ def resolve_link(self, obj_tree): class fortran_subroutine(fortran_scope): - def __init__(self, file_obj, line_number, name, args="", mod_sub=False): - self.base_setup(file_obj, line_number, name) + def __init__(self, file_obj, line_number, name, args="", mod_sub=False, keywords=[]): + self.base_setup(file_obj, line_number, name, keywords=keywords) self.args = args.replace(' ', '').lower() self.args_snip = self.args self.arg_objs = [] @@ -683,7 +686,9 @@ def get_desc(self): def get_hover(self, long=False, include_doc=True, drop_arg=-1): sub_sig, _ = self.get_snippet(drop_arg=drop_arg) - hover_array = ["SUBROUTINE " + sub_sig] + keyword_list = get_keywords(self.keywords) + keyword_list.append("SUBROUTINE ") + hover_array = [" ".join(keyword_list) + sub_sig] doc_str = self.get_documentation() if include_doc and (doc_str is not None): hover_array[0] += "\n" + doc_str @@ -770,8 +775,8 @@ def get_diagnostics(self, file_contents): class fortran_function(fortran_subroutine): def __init__(self, file_obj, line_number, name, args="", - mod_fun=False, return_type=None, result_var=None): - self.base_setup(file_obj, line_number, name) + mod_fun=False, keywords=[], return_type=None, result_var=None): + self.base_setup(file_obj, line_number, name, keywords=keywords) self.args = args.replace(' ', '').lower() self.args_snip = self.args self.arg_objs = [] @@ -782,10 +787,8 @@ def __init__(self, file_obj, line_number, name, args="", self.result_obj = None if return_type is not None: self.return_type = return_type[0] - self.keywords, _ = map_keywords(return_type[1]) else: self.return_type = None - self.keywords = [] def copy_interface(self, copy_source): # Copy arguments and returns @@ -837,7 +840,9 @@ def get_hover(self, long=False, include_doc=True, drop_arg=-1): fun_return, _ = self.result_obj.get_hover(include_doc=False) if self.return_type is not None: fun_return = self.return_type - hover_array = ["{0} FUNCTION {1}".format(fun_return, fun_sig)] + keyword_list = get_keywords(self.keywords) + keyword_list.append("FUNCTION") + hover_array = ["{0} {1} {2}".format(fun_return, " ".join(keyword_list), fun_sig)] doc_str = self.get_documentation() if include_doc and (doc_str is not None): hover_array[0] += "\n" + doc_str diff --git a/fortls/parse_fortran.py b/fortls/parse_fortran.py index 92b828d..480e688 100644 --- a/fortls/parse_fortran.py +++ b/fortls/parse_fortran.py @@ -259,9 +259,11 @@ def read_var_def(line, type_word=None, fun_only=False): def read_fun_def(line, return_type=None, mod_fun=False): mod_match = SUB_MOD_REGEX.match(line) mods_found = False + keywords = [] while mod_match is not None: mods_found = True line = line[mod_match.end(0):] + keywords.append(mod_match.group(1)) mod_match = SUB_MOD_REGEX.match(line) if mods_found: tmp_var = read_var_def(line, fun_only=True) @@ -290,13 +292,15 @@ def read_fun_def(line, return_type=None, mod_fun=False): results_match = RESULT_REGEX.match(trailing_line) if results_match is not None: return_var = results_match.group(1).strip().lower() - return 'fun', [name, args, [return_type, return_var], mod_fun] + return 'fun', [name, args, [return_type, return_var], mod_fun, keywords] def read_sub_def(line, mod_sub=False): + keywords = [] mod_match = SUB_MOD_REGEX.match(line) while mod_match is not None: line = line[mod_match.end(0):] + keywords.append(mod_match.group(1)) mod_match = SUB_MOD_REGEX.match(line) sub_match = SUB_REGEX.match(line) if sub_match is None: @@ -314,7 +318,7 @@ def read_sub_def(line, mod_sub=False): word_match = [word for word in word_match] args = ','.join(word_match) trailing_line = trailing_line[paren_match.end(0):] - return 'sub', [name, args, mod_sub] + return 'sub', [name, args, mod_sub, keywords] def read_block_def(line): @@ -1026,13 +1030,17 @@ def replace_vars(line): if(debug): print('{1} !!! PROGRAM statement({0})'.format(line_number, line.strip())) elif obj_type == 'sub': - new_sub = fortran_subroutine(file_obj, line_number, obj[0], args=obj[1], mod_sub=obj[2]) + keywords, _ = map_keywords(obj[3]) + new_sub = fortran_subroutine(file_obj, line_number, obj[0], args=obj[1], mod_sub=obj[2], + keywords=keywords) file_obj.add_scope(new_sub, END_SUB_WORD) if(debug): print('{1} !!! SUBROUTINE statement({0})'.format(line_number, line.strip())) elif obj_type == 'fun': + keywords, _ = map_keywords(obj[4]) new_fun = fortran_function(file_obj, line_number, obj[0], args=obj[1], - mod_fun=obj[3], return_type=obj[2][0], result_var=obj[2][1]) + mod_fun=obj[3], keywords=keywords, + return_type=obj[2][0], result_var=obj[2][1]) file_obj.add_scope(new_fun, END_FUN_WORD) if obj[2][0] is not None: new_obj = fortran_var(file_obj, line_number, obj[0], obj[2][0][0], obj[2][0][1]) From 1f559fd5063a235c416000e7c8c3b445f9b4d319 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Wed, 20 Mar 2019 20:35:08 -0400 Subject: [PATCH 04/44] Refactor file manipulation tasks into new class - Separate file manipulation and file AST objects - Refactor common file/source code manipulation tasks - Improve line parenthesis sub-string extraction --- fortls/__init__.py | 30 +- fortls/intrinsics.py | 3 +- fortls/langserver.py | 651 ++++++++--------------------------- fortls/objects.py | 203 +++++++---- fortls/parse_fortran.py | 738 ++++++++++++++++++++++++++++++---------- 5 files changed, 858 insertions(+), 767 deletions(-) diff --git a/fortls/__init__.py b/fortls/__init__.py index aaafd71..4dbbc0e 100644 --- a/fortls/__init__.py +++ b/fortls/__init__.py @@ -3,9 +3,9 @@ import os import argparse from multiprocessing import freeze_support -from .langserver import LangServer, read_file_split +from .langserver import LangServer from .jsonrpc import JSONRPC2Connection, ReadWriter, path_from_uri -from .parse_fortran import process_file, detect_fixed_format +from .parse_fortran import fortran_file, process_file __version__ = '1.6.0' @@ -171,23 +171,23 @@ def main(): # print('\nTesting parser') print(' File = "{0}"'.format(args.debug_filepath)) - contents_split, err_str = read_file_split(args.debug_filepath) - if contents_split is None: + file_obj = fortran_file(args.debug_filepath) + err_str = file_obj.load_from_disk() + if err_str is not None: error_exit("Reading file failed: {0}".format(err_str)) - fixed_flag = detect_fixed_format(contents_split) - print(' Detected format: {0}'.format("fixed" if fixed_flag else "free")) + print(' Detected format: {0}'.format("fixed" if file_obj.fixed else "free")) print("\n=========\nParser Output\n=========\n") _, file_ext = os.path.splitext(os.path.basename(args.debug_filepath)) if file_ext == file_ext.upper(): - ast_new = process_file(contents_split, True, fixed_format=fixed_flag, debug=True, pp_defs=pp_defs) + file_ast = process_file(file_obj, True, debug=True, pp_defs=pp_defs) else: - ast_new = process_file(contents_split, True, fixed_format=fixed_flag, debug=True) + file_ast = process_file(file_obj, True, debug=True) print("\n=========\nObject Tree\n=========\n") - for obj in ast_new.get_scopes(): + for obj in file_ast.get_scopes(): print("{0}: {1}".format(obj.get_type(), obj.FQSN)) print_children(obj) print("\n=========\nExportable Objects\n=========\n") - for _, obj in ast_new.global_dict.items(): + for _, obj in file_ast.global_dict.items(): print("{0}: {1}".format(obj.get_type(), obj.FQSN)) # elif debug_server: @@ -202,20 +202,20 @@ def main(): if dir_exists is False: error_exit("Specified 'debug_rootpath' does not exist or is not a directory") print('\nTesting "initialize" request:') - print(' Root = "{0}"\n'.format(args.debug_rootpath)) + print(' Root = "{0}"'.format(args.debug_rootpath)) s.serve_initialize({ "params": {"rootPath": args.debug_rootpath} }) if len(s.post_messages) == 0: - print(" Succesful") + print(" Succesful!") else: print(" Succesful with errors:") for message in s.post_messages: print(" {0}".format(message[1])) # Print module directories - print(" Found module directories:") - for mod_dir in s.source_dirs: - print(" {0}".format(mod_dir)) + print("\n Source directories:") + for source_dir in s.source_dirs: + print(" {0}".format(source_dir)) # if args.debug_diagnostics: print('\nTesting "textDocument/publishDiagnostics" notification:') diff --git a/fortls/intrinsics.py b/fortls/intrinsics.py index 980aa59..a357f1b 100644 --- a/fortls/intrinsics.py +++ b/fortls/intrinsics.py @@ -1,6 +1,7 @@ import os import json -from fortls.objects import fortran_file, fortran_module, fortran_subroutine, \ +from fortls.parse_fortran import fortran_file +from fortls.objects import fortran_module, fortran_subroutine, \ fortran_function, fortran_type, fortran_var, fortran_obj, map_keywords none_file = fortran_file() lowercase_intrinsics = False diff --git a/fortls/langserver.py b/fortls/langserver.py index 8b9d286..9cfa5aa 100644 --- a/fortls/langserver.py +++ b/fortls/langserver.py @@ -1,323 +1,57 @@ import logging -import sys import os import traceback import re # Local modules from fortls.jsonrpc import path_to_uri, path_from_uri -from fortls.parse_fortran import process_file, read_use_stmt, read_var_def, \ - detect_fixed_format, detect_comment_start -from fortls.objects import get_paren_substring, find_in_scope, find_in_workspace, \ - get_use_tree, set_keyword_ordering, MODULE_TYPE_ID, SUBROUTINE_TYPE_ID, \ - FUNCTION_TYPE_ID, CLASS_TYPE_ID, INTERFACE_TYPE_ID, SELECT_TYPE_ID -from fortls.intrinsics import get_intrinsic_keywords, load_intrinsics, set_lowercase_intrinsics +from fortls.parse_fortran import fortran_file, process_file, get_paren_level, \ + get_var_stack, climb_type_tree, expand_name, get_line_context +from fortls.objects import find_in_scope, find_in_workspace, get_use_tree, \ + set_keyword_ordering, MODULE_TYPE_ID, SUBROUTINE_TYPE_ID, FUNCTION_TYPE_ID, \ + CLASS_TYPE_ID, INTERFACE_TYPE_ID, SELECT_TYPE_ID +from fortls.intrinsics import get_intrinsic_keywords, load_intrinsics, \ + set_lowercase_intrinsics log = logging.getLogger(__name__) -PY3K = sys.version_info >= (3, 0) -if not PY3K: - import io # Global regexes FORTRAN_EXT_REGEX = re.compile(r'^\.F(77|90|95|03|08|OR|PP)?$', re.I) -OBJBREAK_REGEX = re.compile(r'[\/\-(.,+*<>=$: ]', re.I) INT_STMNT_REGEX = re.compile(r'^[ ]*[a-z]*$', re.I) -WORD_REGEX = re.compile(r'[a-z_][a-z0-9_]*', re.I) -CALL_REGEX = re.compile(r'[ ]*CALL[ ]+[a-z0-9_%]*$', re.I) -TYPE_STMNT_REGEX = re.compile(r'[ ]*(TYPE|CLASS)[ ]*(IS)?[ ]*$', re.I) -TYPE_DEF_REGEX = re.compile(r'[ ]*TYPE[, ]+', re.I) -EXTENDS_REGEX = re.compile(r'EXTENDS[ ]*$', re.I) -PROCEDURE_STMNT_REGEX = re.compile(r'[ ]*(PROCEDURE)[ ]*$', re.I) SCOPE_DEF_REGEX = re.compile(r'[ ]*(MODULE|PROGRAM|SUBROUTINE|FUNCTION)[ ]+', re.I) END_REGEX = re.compile(r'[ ]*(END)( |MODULE|PROGRAM|SUBROUTINE|FUNCTION|TYPE|DO|IF|SELECT)?', re.I) -IMPORT_REGEX = re.compile(r'[ ]*IMPORT[ ]+', re.I) -FIXED_CONT_REGEX = re.compile(r'( [\S])') -FREE_OPT_CONT_REGEX = re.compile(r'([ ]*&)') - - -def read_file_split(filepath): - # Read and add file from disk - try: - if PY3K: - with open(filepath, 'r', encoding='utf-8', errors='replace') as fhandle: - contents = re.sub(r'\t', r' ', fhandle.read()) - contents_split = contents.splitlines() - else: - with io.open(filepath, 'r', encoding='utf-8', errors='replace') as fhandle: - contents = re.sub(r'\t', r' ', fhandle.read()) - contents_split = contents.splitlines() - except: - log.error("Could not read/decode file %s", filepath, exc_info=True) - return None, 'Could not read/decode file' - else: - return contents_split, None def init_file(filepath, pp_defs): # - contents_split, err_str = read_file_split(filepath) - if contents_split is None: + file_obj = fortran_file(filepath) + err_str = file_obj.load_from_disk() + if err_str is not None: return None, err_str # try: - fixed_flag = detect_fixed_format(contents_split) _, file_ext = os.path.splitext(os.path.basename(filepath)) if file_ext == file_ext.upper(): - ast_new = process_file(contents_split, True, filepath, fixed_flag, pp_defs=pp_defs) + file_ast = process_file(file_obj, True, pp_defs=pp_defs) else: - ast_new = process_file(contents_split, True, filepath, fixed_flag) + file_ast = process_file(file_obj, True) except: log.error("Error while parsing file %s", filepath, exc_info=True) return None, 'Error during parsing' - # Construct new file object and add to workspace - tmp_obj = { - "contents": contents_split, - "ast": ast_new, - "fixed": fixed_flag - } - return tmp_obj, None - - -def tokenize_line(line): - paren_list = [[[-1, len(line)]], []] - level = 1 - in_string = False - string_char = "" - for i, char in enumerate(line): - if in_string: - if char == string_char: - in_string = False - continue - if (char == '(') or (char == '['): - paren_list[level].append([i, len(line)]) - level += 1 - if len(paren_list) < level+1: - paren_list.append([]) - elif (char == ')') or (char == ']'): - paren_list[level-1][-1][1] = i - level -= 1 - elif (char == "'") or (char == '"'): - in_string = True - string_char = char - return paren_split(line, paren_list[:-1]) - - -def paren_split(line, paren_list): - sections = [] - for ilev, level in enumerate(paren_list): - sections.append([]) - for group in level: - i1 = group[0] - i2 = group[1] - tmp_str = "" - i3 = i1 + 1 - ranges = [] - if len(paren_list) > ilev+1: - for lower_group in paren_list[ilev+1]: - if (lower_group[0] > i1) and (lower_group[1] <= i2): - tmp_str += line[i3:lower_group[0]] - ranges.append([i3, lower_group[0]]) - i3 = lower_group[1] + 1 - if i3 < i2: - tmp_str += line[i3:i2] - ranges.append([i3, i2]) - if i3 == len(line): - tmp_str += line[i3:i2] - ranges.append([i3, i2]) - sections[ilev].append([ranges, tmp_str]) - return sections + file_obj.ast = file_ast + return file_obj, None -def get_var_stack(line): - if len(line) == 0: +def get_line_prefix(pre_lines, curr_line, iChar): + # Get full line (and possible continuations) from file + if (curr_line is None) or (iChar > len(curr_line)) or (curr_line[0] == '#'): return None - var_list = tokenize_line(line) - deepest_var = None - final_var = None - final_paren = None - deepest_paren = None - n = len(line) - for var_group in var_list: - for var_tmp in var_group: - for parens in var_tmp[0]: - if n >= parens[0]: - if n <= parens[1]: - final_var = var_tmp[1] - final_paren = parens - break - elif parens[1] == -1: - deepest_var = var_tmp[1] - deepest_paren = parens - if final_var is None: - if deepest_var is not None: - final_var = deepest_var - final_paren = deepest_paren - else: - return None - if final_var.find('%') < 0: - ntail = final_paren[1] - final_paren[0] - # - if ntail == 0: - final_var = '' - elif ntail > 0: - final_var = final_var[len(final_var)-ntail:] - # - if final_var is not None: - final_op_split = OBJBREAK_REGEX.split(final_var) - return final_op_split[-1].split('%') - else: - return None - - -def expand_name(line, char_poss): - for word_match in WORD_REGEX.finditer(line): - if word_match.start(0) <= char_poss and word_match.end(0) >= char_poss: - return word_match.group(0) - return '' - - -def climb_type_tree(var_stack, curr_scope, obj_tree): - def get_type_name(var_obj): - type_desc = get_paren_substring(var_obj.get_desc()) - if type_desc is not None: - type_desc = type_desc.strip().lower() - return type_desc - # Find base variable in current scope - type_name = None - type_scope = None - iVar = 0 - var_name = var_stack[iVar].strip().lower() - var_obj = find_in_scope(curr_scope, var_name, obj_tree) - if var_obj is None: + prepend_string = ''.join(pre_lines) + curr_line = prepend_string + curr_line + iChar += len(prepend_string) + line_prefix = curr_line[:iChar].lower() + # Ignore string literals + if (line_prefix.count("'") % 2 == 1) or (line_prefix.count('"') % 2 == 1): return None - else: - type_name = get_type_name(var_obj) - curr_scope = var_obj.parent - # Search for type, then next variable in stack and so on - for _ in range(30): - # Find variable type in available scopes - if type_name is None: - break - type_scope = find_in_scope(curr_scope, type_name, obj_tree) - # Exit if not found - if type_scope is None: - break - curr_scope = type_scope.parent - # Go to next variable in stack and exit if done - iVar += 1 - if iVar == len(var_stack)-1: - break - # Find next variable by name in scope - var_name = var_stack[iVar].strip().lower() - var_obj = find_in_scope(type_scope, var_name, obj_tree) - # Set scope to declaration location if variable is inherited - if var_obj is not None: - curr_scope = var_obj.parent - if (var_obj.parent is not None) and (var_obj.parent.get_type() == CLASS_TYPE_ID): - for in_child in var_obj.parent.in_children: - if (in_child.name.lower() == var_name) and (in_child.parent is not None): - curr_scope = in_child.parent - type_name = get_type_name(var_obj) - else: - break - else: - raise KeyError - return type_scope - - -def get_line(line, character, file_obj): - try: - curr_line = file_obj["contents"][line] - except: - return None, character - # Handle continuation lines - if file_obj["fixed"]: # Fixed format file - tmp_line = file_obj["contents"][line] - char_out = character - prev_line = line-1 - while(prev_line > 0): - if FIXED_CONT_REGEX.match(tmp_line): - tmp_line = file_obj["contents"][prev_line] - curr_line = tmp_line + curr_line[6:] - char_out += len(tmp_line) - 6 - else: - break - prev_line = prev_line - 1 - return curr_line, char_out - else: # Free format file - char_out = character - prev_line = line-1 - opt_cont_match = FREE_OPT_CONT_REGEX.match(curr_line) - if opt_cont_match is not None: - curr_line = curr_line[opt_cont_match.end(0):] - char_out -= opt_cont_match.end(0) - while(prev_line > 0): - tmp_line = file_obj["contents"][prev_line] - tmp_no_comm = tmp_line.split('!')[0] - cont_ind = tmp_no_comm.rfind('&') - opt_cont_match = FREE_OPT_CONT_REGEX.match(tmp_no_comm) - if opt_cont_match is not None: - if cont_ind == opt_cont_match.end(0)-1: - break - tmp_no_comm = tmp_no_comm[opt_cont_match.end(0):] - cont_ind -= opt_cont_match.end(0) - if cont_ind >= 0: - curr_line = tmp_no_comm[:cont_ind] + curr_line - char_out += cont_ind - else: - break - prev_line = prev_line - 1 - return curr_line, char_out - - -def apply_change(contents_split, change): - """Apply a change to the document.""" - text = change.get('text', "") - change_range = change.get('range') - if not PY3K: - text = text.encode('utf-8') - if len(text) == 0: - text_split = [""] - else: - text_split = text.splitlines() - # Check for ending newline - if (text[-1] == "\n") or (text[-1] == "\r"): - text_split.append("") - - if change_range is None: - # The whole file has changed - return text_split, -1 - - start_line = change_range['start']['line'] - start_col = change_range['start']['character'] - end_line = change_range['end']['line'] - end_col = change_range['end']['character'] - - # Check for an edit occuring at the very end of the file - if start_line == len(contents_split): - return contents_split + text_split, -1 - - # Check for single line edit - if (start_line == end_line) and (len(text_split) == 1): - prev_line = contents_split[start_line] - contents_split[start_line] = prev_line[:start_col] + text + prev_line[end_col:] - return contents_split, start_line - - # Apply standard change to document - new_contents = [] - for i, line in enumerate(contents_split): - if (i < start_line) or (i > end_line): - new_contents.append(line) - continue - - if i == start_line: - for j, change_line in enumerate(text_split): - if j == 0: - new_contents.append(line[:start_col] + change_line) - else: - new_contents.append(change_line) - - if i == end_line: - new_contents[-1] += line[end_col:] - return new_contents, -1 + return line_prefix class LangServer: @@ -581,13 +315,12 @@ def map_types(type): params = request["params"] uri = params["textDocument"]["uri"] path = path_from_uri(uri) - # Get file AST - if path not in self.workspace: + file_obj = self.workspace.get(path) + if file_obj is None: return [] - file_obj = self.workspace[path]["ast"] # Add scopes to outline view test_output = [] - for scope in file_obj.get_scopes(): + for scope in file_obj.ast.get_scopes(): if (scope.name[0] == "#") or (scope.get_type() == SELECT_TYPE_ID): continue scope_tree = scope.FQSN.split("::") @@ -715,108 +448,42 @@ def build_comp(candidate, name_only=False, name_replace=None, is_interface=False if doc_str is not None: comp_obj["documentation"] = doc_str return comp_obj - - def get_context(line, var_prefix): - line_grouped = tokenize_line(line) - lev1_end = line_grouped[0][0][0][-1][1] - if lev1_end < 0: - lev1_end = len(line) - # Test if variable definition statement - test_match = read_var_def(line) - if test_match is not None: - if test_match[0] == 'var': - if (test_match[1][2] is None) and (lev1_end == len(line)): - return 8, var_prefix, None - return 7, var_prefix, None - # Test if in USE statement - test_match = read_use_stmt(line) - if test_match is not None: - if len(test_match[1][1]) > 0: - return 2, var_prefix, test_match[1][0] - else: - return 1, var_prefix, None - # Test if scope declaration or end statement - if SCOPE_DEF_REGEX.match(line) or END_REGEX.match(line): - return -1, None, None - # Test if import statement - if IMPORT_REGEX.match(line): - return 5, var_prefix, None - # In type-def - type_def = False - if TYPE_DEF_REGEX.match(line) is not None: - type_def = True - # Test if in call statement - if lev1_end == len(line): - if CALL_REGEX.match(line_grouped[0][0][1]) is not None: - return 3, var_prefix, None - # Test if variable definition using type/class or procedure - if (len(line_grouped) >= 2) and (len(line_grouped[1][0][0]) > 0): - lev2_end = line_grouped[1][0][0][-1][1] - if lev2_end < 0: - lev2_end = len(line) - if (lev2_end == len(line) - and line_grouped[1][0][0][-1][0] == lev1_end + 1): - test_str = line_grouped[0][0][1] - if ((TYPE_STMNT_REGEX.match(test_str) is not None) - or (type_def and EXTENDS_REGEX.search(test_str) is not None)): - return 4, var_prefix, None - if PROCEDURE_STMNT_REGEX.match(test_str) is not None: - return 6, var_prefix, None - # Only thing on line? - if INT_STMNT_REGEX.match(line) is not None: - return 9, var_prefix, None - # Default context - if type_def: - return -1, var_prefix, None - else: - return 0, var_prefix, None # Get parameters from request req_dict = {"isIncomplete": False, "items": []} params = request["params"] uri = params["textDocument"]["uri"] path = path_from_uri(uri) - if path not in self.workspace: + file_obj = self.workspace.get(path) + if file_obj is None: return req_dict # Check line ac_line = params["position"]["line"] ac_char = params["position"]["character"] # Get full line (and possible continuations) from file - curr_line, ac_char = get_line(ac_line, ac_char, self.workspace[path]) - if curr_line is None: - return req_dict + pre_lines, curr_line, _ = file_obj.get_code_line(ac_line, backward=False, strip_comment=True) + line_prefix = get_line_prefix(pre_lines, curr_line, ac_char) is_member = False try: - line_prefix = curr_line[:ac_char].lower() - # Ignore for comment lines - comm_start = detect_comment_start(line_prefix, self.workspace[path]["fixed"]) - if (comm_start >= 0) or (line_prefix[0] == '#'): - return req_dict - # Ignore string literals - if (line_prefix.count("'") % 2 == 1) or \ - (line_prefix.count('"') % 2 == 1): - return None var_stack = get_var_stack(line_prefix) is_member = (len(var_stack) > 1) var_prefix = var_stack[-1].strip() except: return req_dict # print(var_stack) - file_obj = self.workspace[path]["ast"] item_list = [] - scope_list = file_obj.get_scopes(ac_line+1) + scope_list = file_obj.ast.get_scopes(ac_line+1) # Get context name_only = False public_only = False include_globals = True - line_context, var_prefix, context_info = \ - get_context(line_prefix, var_prefix) - if (line_context < 0) or (var_prefix == '' and not (is_member or line_context == 2)): + line_context, context_info = get_line_context(line_prefix) + if (line_context == 'skip') or (var_prefix == '' and (not is_member)): return req_dict if self.autocomplete_no_prefix: var_prefix = '' # Suggestions for user-defined type members if is_member: - curr_scope = file_obj.get_inner_scope(ac_line+1) + curr_scope = file_obj.ast.get_inner_scope(ac_line+1) type_scope = climb_type_tree(var_stack, curr_scope, self.obj_tree) # Set enclosing type as scope if type_scope is None: @@ -830,8 +497,8 @@ def get_context(line, var_prefix): type_mask = set_type_mask(False) type_mask[1] = True type_mask[4] = True - if line_context == 1: - # Use statement module part (modules only) + if line_context == 'mod_only': + # Module names only (USE statement) for key in self.obj_tree: candidate = self.obj_tree[key][0] if (candidate.get_type() == MODULE_TYPE_ID) and \ @@ -839,8 +506,8 @@ def get_context(line, var_prefix): item_list.append(build_comp(candidate, name_only=True)) req_dict["items"] = item_list return req_dict - elif line_context == 2: - # Use statement only part (module public members only) + elif line_context == 'mod_mems': + # Public module members only (USE ONLY statement) name_only = True mod_name = context_info.lower() if mod_name in self.obj_tree: @@ -850,36 +517,34 @@ def get_context(line, var_prefix): type_mask[4] = False else: return {"isIncomplete": False, "items": []} - elif line_context == 3: - # Filter callables for call statements + elif line_context == 'call': + # Callable objects only ("CALL" statements) req_callable = True - elif line_context == 4: - # Variable definition statement for user-defined type - # (user-defined types only) + elif line_context == 'type_only': + # User-defined types only (variable definitions, select clauses) type_mask = set_type_mask(True) type_mask[4] = False - elif line_context == 5: - # Include statement (variables and user-defined types only) + elif line_context == 'import': + # Import statement (variables and user-defined types only) name_only = True type_mask = set_type_mask(True) type_mask[4] = False type_mask[6] = False - elif line_context == 6: - # Variable definition statement for procedure with interface - # (interfaces only) + elif line_context == 'int_only': + # Interfaces only (procedure definitions) abstract_only = True include_globals = False name_only = True type_mask = set_type_mask(True) type_mask[2] = False type_mask[3] = False - elif line_context == 7: - # Variable definition statement (variables only) + elif line_context == 'var_only': + # Variables only (variable definitions) name_only = True type_mask[2] = True type_mask[3] = True - elif line_context == 8: - # Variable definition keywords (variables only) + elif line_context == 'var_key': + # Variable definition keywords only (variable definition) key_context = 0 enc_scope_type = scope_list[-1].get_type() if enc_scope_type == MODULE_TYPE_ID: @@ -893,11 +558,12 @@ def get_context(line, var_prefix): item_list.append(build_comp(candidate)) req_dict["items"] = item_list return req_dict - elif line_context == 9: + elif line_context == 'first': # First word -> default context plus Fortran statements for candidate in get_intrinsic_keywords(self.statements, self.keywords, 0): if candidate.name.lower().startswith(var_prefix): item_list.append(build_comp(candidate)) + # Build completion list for candidate in get_candidates(scope_list, var_prefix, include_globals, public_only, abstract_only): # Skip module names (only valid in USE) candidate_type = candidate.get_type() @@ -922,21 +588,10 @@ def get_context(line, var_prefix): def get_definition(self, def_file, def_line, def_char): # Get full line (and possible continuations) from file - curr_line, def_char = get_line(def_line, def_char, def_file) - if curr_line is None: - return None - # + pre_lines, curr_line, _ = def_file.get_code_line(def_line, forward=False, strip_comment=True) + line_prefix = get_line_prefix(pre_lines, curr_line, def_char) is_member = False try: - line_prefix = curr_line[:def_char].lower() - # Ignore for comment lines - comm_start = detect_comment_start(line_prefix, def_file["fixed"]) - if (comm_start >= 0) or (line_prefix[0] == '#'): - return None - # Ignore string literals - if (line_prefix.count("'") % 2 == 1) or \ - (line_prefix.count('"') % 2 == 1): - return None var_stack = get_var_stack(line_prefix) is_member = (len(var_stack) > 1) def_name = expand_name(curr_line, def_char) @@ -945,8 +600,7 @@ def get_definition(self, def_file, def_line, def_char): # print(var_stack, def_name) if def_name == '': return None - file_obj = def_file["ast"] - curr_scope = file_obj.get_inner_scope(def_line+1) + curr_scope = def_file.ast.get_inner_scope(def_line+1) # Traverse type tree if necessary if is_member: type_scope = climb_type_tree(var_stack, curr_scope, self.obj_tree) @@ -976,22 +630,12 @@ def get_definition(self, def_file, def_line, def_char): def serve_signature(self, request): def get_sub_name(line): - nLine = len(line) - line_grouped = tokenize_line(line) - if len(line_grouped) < 2: - return None, None, None - lowest_level = -1 - for i, level in enumerate(line_grouped): - if level[-1][0][-1][-1] == nLine: - lowest_level = i - if lowest_level > 0: - arg_string = '' - for char_group in line_grouped[lowest_level]: - arg_string += char_group[-1] - return line_grouped[lowest_level-1][0][1].strip(), arg_string.split(','), \ - line_grouped[lowest_level-1][0][0][-1][1] - else: + _, sections = get_paren_level(line) + if sections[0][0] <= 1: return None, None, None + arg_string = line[sections[0][0]:sections[-1][1]] + sub_string, sections = get_paren_level(line[:sections[0][0]-1]) + return sub_string.strip(), arg_string.split(','), sections[-1][0] def check_optional(arg, params): opt_split = arg.split("=") @@ -1007,37 +651,27 @@ def check_optional(arg, params): params = request["params"] uri = params["textDocument"]["uri"] path = path_from_uri(uri) - if path not in self.workspace: + file_obj = self.workspace.get(path) + if file_obj is None: return req_dict # Check line sig_line = params["position"]["line"] sig_char = params["position"]["character"] # Get full line (and possible continuations) from file - curr_line, sig_char = get_line(sig_line, sig_char, self.workspace[path]) - if curr_line is None: - return req_dict + pre_lines, curr_line, _ = file_obj.get_code_line(sig_line, backward=False, strip_comment=True) + line_prefix = get_line_prefix(pre_lines, curr_line, sig_char) # Test if scope declaration or end statement if SCOPE_DEF_REGEX.match(curr_line) or END_REGEX.match(curr_line): return req_dict is_member = False try: - line_prefix = curr_line[:sig_char].lower() - # Ignore for comment lines - comm_start = detect_comment_start(line_prefix, self.workspace[path]["fixed"]) - if (comm_start >= 0) or (line_prefix[0] == '#'): - return req_dict - # Ignore string literals - if (line_prefix.count("'") % 2 == 1) or \ - (line_prefix.count('"') % 2 == 1): - return req_dict sub_name, arg_strings, sub_end = get_sub_name(line_prefix) var_stack = get_var_stack(sub_name) is_member = (len(var_stack) > 1) except: return req_dict # - file_obj = self.workspace[path]["ast"] - curr_scope = file_obj.get_inner_scope(sig_line+1) + curr_scope = file_obj.ast.get_inner_scope(sig_line+1) # Traverse type tree if necessary if is_member: type_scope = climb_type_tree(var_stack, curr_scope, self.obj_tree) @@ -1104,12 +738,11 @@ def serve_references(self, request): def_line = params["position"]["line"] def_char = params["position"]["character"] path = path_from_uri(uri) - refs = [] - # Find object - if path in self.workspace: - def_obj = self.get_definition(self.workspace[path], def_line, def_char) - else: + file_obj = self.workspace.get(path) + if file_obj is None: return [] + # Find object + def_obj = self.get_definition(file_obj, def_line, def_char) if def_obj is None: return [] # @@ -1119,8 +752,8 @@ def serve_references(self, request): if def_obj.parent.get_type() == CLASS_TYPE_ID: type_mem = True else: - restrict_file = def_obj.file.path - if restrict_file not in self.workspace: + restrict_file = self.workspace.get(def_obj.file.path) + if restrict_file is None: return [] # Search through all files def_name = def_obj.name.lower() @@ -1129,19 +762,18 @@ def serve_references(self, request): if restrict_file is None: file_set = self.workspace.items() else: - file_set = ((restrict_file, self.workspace.get(restrict_file)), ) + file_set = ((restrict_file.path, restrict_file), ) override_cache = [] + refs = [] for filename, file_obj in sorted(file_set): # Search through file line by line - for (i, line) in enumerate(file_obj["contents"]): + for (i, line) in enumerate(file_obj.contents_split): if len(line) == 0: continue # Skip comment lines - comm_start = detect_comment_start(line, file_obj["fixed"]) - if (comm_start == 0) or (line[0] == '#'): + line = file_obj.strip_comment(line) + if (line == '') or (line[0] == '#'): continue - elif comm_start > 0: - line = line[:comm_start] for match in NAME_REGEX.finditer(line): var_def = self.get_definition(file_obj, i, match.start(1)+1) if var_def is not None: @@ -1178,11 +810,11 @@ def serve_definition(self, request): def_line = params["position"]["line"] def_char = params["position"]["character"] path = path_from_uri(uri) - # Find object - if path in self.workspace: - var_obj = self.get_definition(self.workspace[path], def_line, def_char) - else: + file_obj = self.workspace.get(path) + if file_obj is None: return None + # Find object + var_obj = self.get_definition(file_obj, def_line, def_char) if var_obj is None: return None # Construct link reference @@ -1212,11 +844,11 @@ def create_hover(string, highlight): def_line = params["position"]["line"] def_char = params["position"]["character"] path = path_from_uri(uri) - # Find object - if path in self.workspace: - var_obj = self.get_definition(self.workspace[path], def_line, def_char) - else: + file_obj = self.workspace.get(path) + if file_obj is None: return None + # Find object + var_obj = self.get_definition(file_obj, def_line, def_char) if var_obj is None: return None # Construct hover information @@ -1245,11 +877,11 @@ def serve_implementation(self, request): def_line = params["position"]["line"] def_char = params["position"]["character"] path = path_from_uri(uri) - # Find object - if path in self.workspace: - var_obj = self.get_definition(self.workspace[path], def_line, def_char) - else: + file_obj = self.workspace.get(path) + if file_obj is None: return None + # Find object + var_obj = self.get_definition(file_obj, def_line, def_char) if var_obj is None: return None # Construct implementation reference @@ -1289,13 +921,25 @@ def serve_codeActions(self, request): sline = params["range"]["start"]["line"] eline = params["range"]["end"]["line"] path = path_from_uri(uri) + file_obj = self.workspace.get(path) # Find object - if path in self.workspace: - file_obj = self.workspace[path]["ast"] - curr_scope = file_obj.get_inner_scope(sline) - if curr_scope is not None: - return curr_scope.get_actions(sline, eline) - return None + if file_obj is None: + return None + curr_scope = file_obj.ast.get_inner_scope(sline) + if curr_scope is None: + return None + action_list = curr_scope.get_actions(sline, eline) + if action_list is None: + return None + # Convert diagnostics + for action in action_list: + diagnostics = action.get("diagnostics") + if diagnostics is not None: + new_diags = [] + for diagnostic in diagnostics: + new_diags.append(diagnostic.build(file_obj)) + action["diagnostics"] = new_diags + return action_list def send_diagnostics(self, uri): diag_results, diag_exp = self.get_diagnostics(uri) @@ -1315,11 +959,10 @@ def send_diagnostics(self, uri): def get_diagnostics(self, uri): filepath = path_from_uri(uri) - if filepath in self.workspace: - file_obj = self.workspace[filepath]["ast"] - file_contents = self.workspace[filepath]["contents"] + file_obj = self.workspace.get(filepath) + if file_obj is not None: try: - diags = file_obj.check_file(self.obj_tree, file_contents) + diags = file_obj.ast.check_file(self.obj_tree) except Exception as e: return None, e else: @@ -1331,36 +974,32 @@ def serve_onChange(self, request): params = request["params"] uri = params["textDocument"]["uri"] path = path_from_uri(uri) - # Update file contents with changes - if self.sync_type == 1: - file_text = params["contentChanges"][0]["text"] - if not PY3K: - file_text = file_text.encode('utf-8') - new_contents = file_text.splitlines() + file_obj = self.workspace.get(path) + if file_obj is None: + self.post_message('Change request failed for unknown file "{0}"'.format(path)) + log.error('Change request failed for unknown file "%s"', path) + return else: - if path in self.workspace: - new_contents = self.workspace[path]["contents"] + # Update file contents with changes + if self.sync_type == 1: + file_obj.apply_change(params["contentChanges"][0]) + else: try: for change in params["contentChanges"]: - old_contents = new_contents - new_contents, _ = apply_change(old_contents, change) + _ = file_obj.apply_change(change) except: self.post_message('Change request failed for file "{0}": Could not apply change'.format(path)) log.error('Change request failed for file "%s": Could not apply change', path, exc_info=True) return - else: - self.post_message('Change request failed for unknown file "{0}"'.format(path)) - log.error('Change request failed for unknown file "%s"', path) - return # Parse newly updated file - err_str = self.update_workspace_file(new_contents, path, update_links=True) + err_str = self.update_workspace_file(path, update_links=True) if err_str is not None: self.post_message('Change request failed for file "{0}": {1}'.format(path, err_str)) return # Update include statements linking to this file - for _, file_obj in self.workspace.items(): - file_obj["ast"].resolve_includes(self.workspace, path=path) - self.workspace[path]["ast"].resolve_includes(self.workspace) + for _, tmp_file in self.workspace.items(): + tmp_file.ast.resolve_includes(self.workspace, path=path) + file_obj.ast.resolve_includes(self.workspace) # Update inheritance (currently only on open/save) # for key in self.obj_tree: # self.obj_tree[key][0].resolve_inherit(self.obj_tree) @@ -1382,8 +1021,9 @@ def serve_onSave(self, request, test_exist=False): return # Update include statements linking to this file for _, file_obj in self.workspace.items(): - file_obj["ast"].resolve_includes(self.workspace, path=filepath) - self.workspace[filepath]["ast"].resolve_includes(self.workspace) + file_obj.ast.resolve_includes(self.workspace, path=filepath) + file_obj = self.workspace.get(filepath) + file_obj.ast.resolve_includes(self.workspace) # Update inheritance for key in self.obj_tree: self.obj_tree[key][0].resolve_inherit(self.obj_tree) @@ -1391,36 +1031,33 @@ def serve_onSave(self, request, test_exist=False): self.send_diagnostics(uri) def add_file(self, filepath): - # Read and add file from disk - contents_split, err_str = read_file_split(filepath) - if contents_split is None: - return err_str - return self.update_workspace_file(contents_split, filepath) + return self.update_workspace_file(filepath, read_file=True) - def update_workspace_file(self, contents_split, filepath, update_links=False): + def update_workspace_file(self, filepath, read_file=False, update_links=False): # Update workspace from file contents and path try: - fixed_flag = detect_fixed_format(contents_split) + file_obj = self.workspace.get(filepath) + if read_file: + if file_obj is None: + file_obj = fortran_file(filepath) + file_obj.load_from_disk() _, file_ext = os.path.splitext(os.path.basename(filepath)) if file_ext == file_ext.upper(): - ast_new = process_file(contents_split, True, filepath, fixed_flag, pp_defs=self.pp_defs) + ast_new = process_file(file_obj, True, pp_defs=self.pp_defs) else: - ast_new = process_file(contents_split, True, filepath, fixed_flag) + ast_new = process_file(file_obj, True) except: log.error("Error while parsing file %s", filepath, exc_info=True) return 'Error during parsing' # Error during parsing # Remove old objects from tree - if filepath in self.workspace: - ast_old = self.workspace[filepath]["ast"] + ast_old = file_obj.ast + if ast_old is not None: for key in ast_old.global_dict: self.obj_tree.pop(key, None) - # Construct new file object and add to workspace - tmp_obj = { - "contents": contents_split, - "ast": ast_new, - "fixed": fixed_flag - } - self.workspace[filepath] = tmp_obj + # Add new file to workspace + file_obj.ast = ast_new + if filepath not in self.workspace: + self.workspace[filepath] = file_obj # Add top-level objects to object tree for key, obj in ast_new.global_dict.items(): self.obj_tree[key] = [obj, filepath] @@ -1463,12 +1100,12 @@ def workspace_init(self): continue self.workspace[path] = result_obj[0] # Add top-level objects to object tree - ast_new = self.workspace[path]["ast"] + ast_new = self.workspace[path].ast for key in ast_new.global_dict: self.obj_tree[key] = [ast_new.global_dict[key], path] # Update include statements for _, file_obj in self.workspace.items(): - file_obj["ast"].resolve_includes(self.workspace) + file_obj.ast.resolve_includes(self.workspace) # Update inheritance for key in self.obj_tree: self.obj_tree[key][0].resolve_inherit(self.obj_tree) diff --git a/fortls/objects.py b/fortls/objects.py index 5049adc..ea3fc68 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -200,7 +200,7 @@ def add_children(mod_obj, query): def find_word_in_line(line, word): - i0 = 0 + i0 = -1 for poss_name in WORD_REGEX.finditer(line): if poss_name.group() == word: i0 = poss_name.start() @@ -237,6 +237,58 @@ def build_diagnostic(sline, message, severity=1, eline=None, file_contents=None, return diag +class fortran_diagnostic: + def __init__(self, sline, message, severity=1, find_word=None): + self.sline = sline + self.message = message + self.severity = severity + self.find_word = find_word + self.has_related = False + self.related_path = None + self.related_line = None + self.related_message = None + + def add_related(self, path, line, message): + self.has_related = True + self.related_path = path + self.related_line = line + self.related_message = message + + def build(self, file_obj): + schar = echar = 0 + if self.find_word is not None: + _, curr_line, forward_lines = file_obj.get_code_line(self.sline, backward=False) + schar, echar = find_word_in_line(curr_line.lower(), self.find_word.lower()) + if schar < 0: + for (i, line) in enumerate(forward_lines): + schar, echar = find_word_in_line(line.lower(), self.find_word.lower()) + if schar >= 0: + self.sline += i+1 + break + if schar < 0: + schar = echar = 0 + diag = { + "range": { + "start": {"line": self.sline, "character": schar}, + "end": {"line": self.sline, "character": echar} + }, + "message": self.message, + "severity": self.severity + } + if self.has_related: + diag["relatedInformation"] = [{ + "location": { + "uri": path_to_uri(self.related_path), + "range": { + "start": {"line": self.related_line, "character": 0}, + "end": {"line": self.related_line, "character": 0} + } + }, + "message": self.related_message + }] + return diag + + class fortran_obj: def __init__(self): self.vis = 0 @@ -297,7 +349,7 @@ def get_children(self, public_only=False): def get_ancestors(self): return [] - def get_diagnostics(self, file_contents): + def get_diagnostics(self): return [] def get_implicit(self): @@ -333,7 +385,7 @@ def req_named_end(self): def check_valid_parent(self): return True - def check_definition(self, file_contents, obj_tree, known_types={}, import_objs=None): + def check_definition(self, obj_tree, known_types={}, import_objs=None): return None, known_types @@ -421,7 +473,7 @@ def get_children(self, public_only=False): else: return self.children - def check_definitions(self, file_contents, obj_tree): + def check_definitions(self, obj_tree): """Check for definition errors in scope""" FQSN_dict = {} for child in self.children: @@ -444,7 +496,7 @@ def check_definitions(self, file_contents, obj_tree): line_number = child.sline - 1 # Check for type definition in scope def_error, known_types = child.check_definition( - file_contents, obj_tree, known_types=known_types, import_objs=import_objs + obj_tree, known_types=known_types, import_objs=import_objs ) if def_error is not None: errors.append(def_error) @@ -454,12 +506,12 @@ def check_definitions(self, file_contents, obj_tree): # Check other variables in current scope if child.FQSN in FQSN_dict: if line_number > FQSN_dict[child.FQSN]: - errors.append(build_diagnostic( + new_diag = fortran_diagnostic( line_number, message='Variable "{0}" declared twice in scope'.format(child.name), - severity=1, file_contents=file_contents, find_word=child.name, - related_path=self.file.path, related_line=FQSN_dict[child.FQSN], - related_message='First declaration' - )) + severity=1, find_word=child.name + ) + new_diag.add_related(path=self.file.path, line=FQSN_dict[child.FQSN], message='First declaration') + errors.append(new_diag) continue # Check for masking from parent scope in subroutines, functions, and blocks if (self.parent is not None) and \ @@ -469,15 +521,16 @@ def check_definitions(self, file_contents, obj_tree): # Ignore if function return variable if (self.get_type() == FUNCTION_TYPE_ID) and (parent_var.FQSN == self.FQSN): continue - errors.append(build_diagnostic( + new_diag = fortran_diagnostic( line_number, message='Variable "{0}" masks variable in parent scope'.format(child.name), - severity=2, file_contents=file_contents, find_word=child.name, - related_path=parent_var.file.path, related_line=parent_var.sline-1, - related_message='First declaration' - )) + severity=2, find_word=child.name + ) + new_diag.add_related(path=parent_var.file.path, line=parent_var.sline-1, + message='First declaration') + errors.append(new_diag) return errors - def check_use(self, obj_tree, file_contents): + def check_use(self, obj_tree): errors = [] last_use_line = -1 for use_line in self.use: @@ -485,21 +538,23 @@ def check_use(self, obj_tree, file_contents): last_use_line = max(last_use_line, use_line[2]) if use_mod.startswith('#import'): if (self.parent is None) or (self.parent.get_type() != INTERFACE_TYPE_ID): - errors.append(build_diagnostic( - use_line[2]-1, message='IMPORT statement outside of interface', - severity=1 - )) + new_diag = fortran_diagnostic( + use_line[2]-1, message='IMPORT statement outside of interface', severity=1 + ) + errors.append(new_diag) continue if use_mod not in obj_tree: - errors.append(build_diagnostic( + new_diag = fortran_diagnostic( use_line[2]-1, message='Module "{0}" not found in project'.format(use_mod), - severity=3, file_contents=file_contents, find_word=use_mod - )) + severity=3, find_word=use_mod + ) + errors.append(new_diag) if (self.implicit_line is not None) and (last_use_line >= self.implicit_line): - errors.append(build_diagnostic( + new_diag = fortran_diagnostic( self.implicit_line-1, message='USE statements after IMPLICIT statement', - severity=1, file_contents=file_contents, find_word='IMPLICIT' - )) + severity=1, find_word='IMPLICIT' + ) + errors.append(new_diag) return errors def add_subroutine(self, interface_string, no_contains=False): @@ -751,14 +806,15 @@ def check_valid_parent(self): return False return True - def get_diagnostics(self, file_contents): + def get_diagnostics(self): errors = [] for missing_obj in self.missing_args: - errors.append(build_diagnostic( + new_diag = fortran_diagnostic( missing_obj.sline-1, 'Variable "{0}" with INTENT keyword not found in argument list'.format(missing_obj.name), - severity=1, file_contents=file_contents, find_word=missing_obj.name - )) + severity=1, find_word=missing_obj.name + ) + errors.append(new_diag) implicit_flag = self.get_implicit() if (implicit_flag is None) or (implicit_flag): return errors @@ -766,10 +822,11 @@ def get_diagnostics(self, file_contents): for (i, arg_obj) in enumerate(self.arg_objs): if arg_obj is None: arg_name = arg_list[i].strip() - errors.append(build_diagnostic( + new_diag = fortran_diagnostic( self.sline-1, 'No matching declaration found for argument "{0}"'.format(arg_name), - severity=1, file_contents=file_contents, find_word=arg_name - )) + severity=1, find_word=arg_name + ) + errors.append(new_diag) return errors @@ -948,15 +1005,17 @@ def check_valid_parent(self): return False return True - def get_diagnostics(self, file_contents): + def get_diagnostics(self): errors = [] for in_child in self.in_children: if in_child.keywords.count(KEYWORD_ID_DICT['deferred']) > 0: - errors.append(build_diagnostic( + new_diag = fortran_diagnostic( self.eline - 1, 'Deferred procedure "{0}" not implemented'.format(in_child.name), - severity=1, related_path=in_child.file.path, - related_line=in_child.sline-1, related_message='Inherited procedure declaration' - )) + severity=1 + ) + new_diag.add_related(path=in_child.file.path, line=in_child.sline-1, + message='Inherited procedure declaration') + errors.append(new_diag) return errors def get_actions(self, sline, eline): @@ -997,15 +1056,17 @@ def get_actions(self, sline, eline): "newText": " PROCEDURE :: {0} => {0}\n".format(in_child.name) }) edits += interface_edits - diagnostics.append(build_diagnostic( + new_diag = fortran_diagnostic( line_number, 'Deferred procedure "{0}" not implemented'.format(in_child.name), - severity=1, related_path=in_child.file.path, - related_line=in_child.sline-1, related_message='Inherited procedure declaration' - )) + severity=1 + ) + new_diag.add_related(path=in_child.file.path, line=in_child.sline-1, + message='Inherited procedure declaration') + diagnostics.append(new_diag) has_edits = True # if has_edits: - actions.append({ + actions = [{ "title": "Implement deferred procedures", "kind": "quickfix", "edit": { @@ -1014,7 +1075,7 @@ def get_actions(self, sline, eline): } }, "diagnostics": diagnostics - }) + }] return actions @@ -1271,7 +1332,7 @@ def is_optional(self): def is_callable(self): return self.callable - def check_definition(self, file_contents, obj_tree, known_types={}, import_objs=None): + def check_definition(self, obj_tree, known_types={}, import_objs=None): # Check for type definition in scope type_match = DEF_KIND_REGEX.match(self.desc) if type_match is not None: @@ -1298,17 +1359,16 @@ def check_definition(self, file_contents, obj_tree, known_types={}, import_objs= if type_info is not None: if type_info[0] == 1: type_def = type_info[1] - out_diag = build_diagnostic( + out_diag = fortran_diagnostic( self.sline-1, message='Object "{0}" not found in scope'.format(desc_obj_name), - severity=1, file_contents=file_contents, find_word=desc_obj_name, - related_path=type_def.file.path, related_line=type_def.sline-1, - related_message='Possible object' + severity=1, find_word=desc_obj_name ) + out_diag.add_related(path=type_def.file.path, line=type_def.sline-1, message='Possible object') return out_diag, known_types elif (import_objs is not None) and (desc_obj_name not in import_objs): - out_diag = build_diagnostic( + out_diag = fortran_diagnostic( self.sline-1, message='Object "{0}" not imported in interface'.format(desc_obj_name), - severity=1, file_contents=file_contents, find_word=desc_obj_name + severity=1, find_word=desc_obj_name ) return out_diag, known_types return None, known_types @@ -1419,13 +1479,16 @@ def resolve_link(self, obj_tree): def is_callable(self): return True - def check_definition(self, file_contents, obj_tree, known_types={}, import_objs=None): + def check_definition(self, obj_tree, known_types={}, import_objs=None): return None, known_types -class fortran_file: - def __init__(self, path=None): - self.path = path +class fortran_ast: + def __init__(self, file_obj=None): + self.file = file_obj + self.path = None + if file_obj is not None: + self.path = file_obj.path self.global_dict = {} self.scope_list = [] self.variable_list = [] @@ -1446,12 +1509,14 @@ def __init__(self, path=None): self.pending_doc = None def create_none_scope(self): + """Create empty scope to hold non-module contained items""" if self.none_scope is not None: raise ValueError self.none_scope = fortran_program(self, 1, "main") self.add_scope(self.none_scope, re.compile(r'[ ]*END[ ]*PROGRAM', re.I), exportable=False) def get_enc_scope_name(self): + """Get current enclosing scope name""" if self.current_scope is None: return None return self.current_scope.FQSN @@ -1595,7 +1660,7 @@ def get_object(self, FQSN): return curr_obj def resolve_includes(self, workspace, path=None): - file_dir = os.path.dirname(self.path) + file_dir = os.path.dirname(self.file.path) for include_path in self.include_stmnts: file_path = os.path.normpath(os.path.join(file_dir, include_path[1])) if path is not None: @@ -1604,19 +1669,20 @@ def resolve_includes(self, workspace, path=None): parent_scope = self.get_inner_scope(include_path[0]) added_entities = include_path[2] if file_path in workspace: - include_obj = workspace[file_path]["ast"] - if include_obj.none_scope is not None: - if include_obj.inc_scope is None: - include_obj.inc_scope = include_obj.none_scope + include_file = workspace[file_path] + include_ast = include_file.ast + if include_ast.none_scope is not None: + if include_ast.inc_scope is None: + include_ast.inc_scope = include_ast.none_scope # Remove old objects for obj in added_entities: parent_scope.children.remove(obj) added_entities = [] - for child in include_obj.inc_scope.children: + for child in include_ast.inc_scope.children: added_entities.append(child) parent_scope.add_child(child) child.update_fqsn(parent_scope.FQSN) - include_obj.none_scope = parent_scope + include_ast.none_scope = parent_scope include_path[2] = added_entities def close_file(self, line_number): @@ -1637,7 +1703,7 @@ def close_file(self, line_number): if obj is not None: obj.set_visibility(1) - def check_file(self, obj_tree, file_contents): + def check_file(self, obj_tree): errors = [] tmp_list = self.scope_list[:] if self.none_scope is not None: @@ -1663,7 +1729,10 @@ def check_file(self, obj_tree, file_contents): scope.sline-1, message='Invalid parent for "{0}" declaration'.format(scope.get_desc()), severity=1 )) - errors += scope.check_use(obj_tree, file_contents) - errors += scope.check_definitions(file_contents, obj_tree) - errors += scope.get_diagnostics(file_contents) - return errors + errors += scope.check_use(obj_tree) + errors += scope.check_definitions(obj_tree) + errors += scope.get_diagnostics() + diagnostics = [] + for error in errors: + diagnostics.append(error.build(self.file)) + return diagnostics diff --git a/fortls/parse_fortran.py b/fortls/parse_fortran.py index 480e688..e75b7ed 100644 --- a/fortls/parse_fortran.py +++ b/fortls/parse_fortran.py @@ -1,10 +1,15 @@ from __future__ import print_function +import sys import re -from fortls.objects import get_paren_substring, map_keywords, fortran_module, \ - fortran_program, fortran_submodule, fortran_subroutine, fortran_function, \ - fortran_block, fortran_select, fortran_type, fortran_enum, fortran_int, \ - fortran_var, fortran_meth, fortran_associate, fortran_do, fortran_where, \ - fortran_if, fortran_file, INTERFACE_TYPE_ID, SELECT_TYPE_ID +from fortls.objects import get_paren_substring, map_keywords, find_in_scope, \ + fortran_ast, fortran_module, fortran_program, fortran_submodule, \ + fortran_subroutine, fortran_function, fortran_block, fortran_select, \ + fortran_type, fortran_enum, fortran_int, fortran_var, fortran_meth, \ + fortran_associate, fortran_do, fortran_where, fortran_if, \ + INTERFACE_TYPE_ID, SELECT_TYPE_ID, CLASS_TYPE_ID +PY3K = sys.version_info >= (3, 0) +if not PY3K: + import io # Fortran statement matching rules USE_REGEX = re.compile(r'[ ]*USE([, ]+INTRINSIC)?[ :]+([a-z0-9_]*)([, ]+ONLY[ :]+)?', re.I) IMPORT_REGEX = re.compile(r'[ ]*IMPORT[ :]+([a-z_])', re.I) @@ -64,6 +69,7 @@ TATTR_LIST_REGEX = re.compile(r'[ ]*,[ ]*(PUBLIC|PRIVATE|ABSTRACT|EXTENDS\([a-z0-9_]*\))', re.I) VIS_REGEX = re.compile(r'[ ]*(PUBLIC|PRIVATE)', re.I) WORD_REGEX = re.compile(r'[a-z_][a-z0-9_]*', re.I) +OBJBREAK_REGEX = re.compile(r'[\/\-(.,+*<>=$: ]', re.I) SUB_PAREN_MATCH = re.compile(r'\([a-z0-9_, ]*\)', re.I) KIND_SPEC_MATCH = re.compile(r'\([a-z0-9_, =*]*\)', re.I) SQ_STRING_REGEX = re.compile(r'\'[^\']*\'', re.I) @@ -84,6 +90,145 @@ PP_REGEX = re.compile(r'#(if |ifdef|ifndef|else|elif|endif)') PP_DEF_REGEX = re.compile(r'#(define|undef)[ ]*([a-z0-9_]+)', re.I) PP_DEF_TEST_REGEX = re.compile(r'(![ ]*)?defined[ ]*\([ ]*([a-z0-9_]*)[ ]*\)$', re.I) +# Context matching rules +CALL_REGEX = re.compile(r'[ ]*CALL[ ]+[a-z0-9_%]*$', re.I) +INT_STMNT_REGEX = re.compile(r'^[ ]*[a-z]*$', re.I) +TYPE_STMNT_REGEX = re.compile(r'[ ]*(TYPE|CLASS)[ ]*(IS)?[ ]*$', re.I) +PROCEDURE_STMNT_REGEX = re.compile(r'[ ]*(PROCEDURE)[ ]*$', re.I) +SCOPE_DEF_REGEX = re.compile(r'[ ]*(MODULE|PROGRAM|SUBROUTINE|FUNCTION)[ ]+', re.I) +END_REGEX = re.compile(r'[ ]*(END)( |MODULE|PROGRAM|SUBROUTINE|FUNCTION|TYPE|DO|IF|SELECT)?', re.I) + + +def get_var_stack(line): + """Get user-defined type field sequence""" + if len(line) == 0: + return None + final_var, sections = get_paren_level(line) + if final_var == '': + return [''] + if final_var.find('%') < 0: + final_paren = sections[-1] + ntail = final_paren[1] - final_paren[0] + # + if ntail == 0: + final_var = '' + elif ntail > 0: + final_var = final_var[len(final_var)-ntail:] + # + if final_var is not None: + final_op_split = OBJBREAK_REGEX.split(final_var) + return final_op_split[-1].split('%') + else: + return None + + +def expand_name(line, char_poss): + """Get full word containing current position""" + for word_match in WORD_REGEX.finditer(line): + if word_match.start(0) <= char_poss and word_match.end(0) >= char_poss: + return word_match.group(0) + return '' + + +def climb_type_tree(var_stack, curr_scope, obj_tree): + """Walk up user-defined type sequence to determine final field type""" + def get_type_name(var_obj): + type_desc = get_paren_substring(var_obj.get_desc()) + if type_desc is not None: + type_desc = type_desc.strip().lower() + return type_desc + # Find base variable in current scope + type_name = None + type_scope = None + iVar = 0 + var_name = var_stack[iVar].strip().lower() + var_obj = find_in_scope(curr_scope, var_name, obj_tree) + if var_obj is None: + return None + else: + type_name = get_type_name(var_obj) + curr_scope = var_obj.parent + # Search for type, then next variable in stack and so on + for _ in range(30): + # Find variable type in available scopes + if type_name is None: + break + type_scope = find_in_scope(curr_scope, type_name, obj_tree) + # Exit if not found + if type_scope is None: + break + curr_scope = type_scope.parent + # Go to next variable in stack and exit if done + iVar += 1 + if iVar == len(var_stack)-1: + break + # Find next variable by name in scope + var_name = var_stack[iVar].strip().lower() + var_obj = find_in_scope(type_scope, var_name, obj_tree) + # Set scope to declaration location if variable is inherited + if var_obj is not None: + curr_scope = var_obj.parent + if (var_obj.parent is not None) and (var_obj.parent.get_type() == CLASS_TYPE_ID): + for in_child in var_obj.parent.in_children: + if (in_child.name.lower() == var_name) and (in_child.parent is not None): + curr_scope = in_child.parent + type_name = get_type_name(var_obj) + else: + break + else: + raise KeyError + return type_scope + + +def get_line_context(line): + """Get context of ending position in line (for completion)""" + last_level, sections = get_paren_level(line) + lev1_end = sections[-1][1] + # Test if variable definition statement + test_match = read_var_def(line) + if test_match is not None: + if test_match[0] == 'var': + if (test_match[1][2] is None) and (lev1_end == len(line)): + return 'var_key', None + return 'var_only', None + # Test if in USE statement + test_match = read_use_stmt(line) + if test_match is not None: + if len(test_match[1][1]) > 0: + return 'mod_mems', test_match[1][0] + else: + return 'mod_only', None + # Test if scope declaration or end statement + if SCOPE_DEF_REGEX.match(line) or END_REGEX.match(line): + return 'skip', None + # Test if import statement + if IMPORT_REGEX.match(line): + return 'import', None + # In type-def + type_def = False + if TYPE_DEF_REGEX.match(line) is not None: + type_def = True + # Test if in call statement + if lev1_end == len(line): + if CALL_REGEX.match(last_level) is not None: + return 'call', None + # Test if variable definition using type/class or procedure + if (len(sections) == 1) and (sections[0][0] >= 1): + # Get string one level up + test_str, _ = get_paren_level(line[:sections[0][0]-1]) + if ((TYPE_STMNT_REGEX.match(test_str) is not None) + or (type_def and EXTENDS_REGEX.search(test_str) is not None)): + return 'type_only', None + if PROCEDURE_STMNT_REGEX.match(test_str) is not None: + return 'int_only', None + # Only thing on line? + if INT_STMNT_REGEX.match(line) is not None: + return 'first', None + # Default context + if type_def: + return 'skip', None + else: + return 'default', None def detect_fixed_format(file_lines): @@ -104,20 +249,8 @@ def detect_fixed_format(file_lines): return True -def detect_comment_start(line, fixed_format=False): - if fixed_format: - if FIXED_OPENMP_MATCH.match(line) is not None: - return -1 - if FIXED_COMMENT_LINE_MATCH.match(line) is not None: - return 0 - else: - if FREE_OPENMP_MATCH.match(line) is not None: - return -1 - return strip_strings(line).find('!') - return -1 - - def strip_line_label(line): + """Strip leading numeric line label""" match = LINE_LABEL_REGEX.match(line) if match is None: return line, None @@ -127,22 +260,24 @@ def strip_line_label(line): return out_str, line_label -def strip_strings(in_str, maintain_len=False): +def strip_strings(in_line, maintain_len=False): + """String string literals from code line""" def repl_sq(m): return "'{0}'".format(' '*(len(m.group())-2)) def repl_dq(m): return '"{0}"'.format(' '*(len(m.group())-2)) if maintain_len: - out_str = SQ_STRING_REGEX.sub(repl_sq, in_str) - out_str = DQ_STRING_REGEX.sub(repl_dq, out_str) + out_line = SQ_STRING_REGEX.sub(repl_sq, in_line) + out_line = DQ_STRING_REGEX.sub(repl_dq, out_line) else: - out_str = SQ_STRING_REGEX.sub('', in_str) - out_str = DQ_STRING_REGEX.sub('', out_str) - return out_str + out_line = SQ_STRING_REGEX.sub('', in_line) + out_line = DQ_STRING_REGEX.sub('', out_line) + return out_line def separate_def_list(test_str): + """Separate definition lists, skipping parenthesis and bracket groups""" stripped_str = strip_strings(test_str) paren_count = 0 def_list = [] @@ -168,6 +303,7 @@ def separate_def_list(test_str): def find_paren_match(test_str): + """Find matching closing parenthesis by searching forward""" paren_count = 1 ind = -1 for (i, char) in enumerate(test_str): @@ -180,7 +316,48 @@ def find_paren_match(test_str): return ind +def get_paren_level(line): + """Get sub-string corresponding to a single parenthesis level, + via backward search up through the line. + """ + if line == '': + return '', [[0, 0]] + level = 0 + in_string = False + string_char = "" + i1 = len(line) + sections = [] + for i in range(len(line)-1, -1, -1): + char = line[i] + if in_string: + if char == string_char: + in_string = False + continue + if (char == '(') or (char == '['): + level -= 1 + if level == 0: + i1 = i + elif level < 0: + sections.append([i+1, i1]) + break + elif (char == ')') or (char == ']'): + level += 1 + if level == 1: + sections.append([i+1, i1]) + elif (char == "'") or (char == '"'): + in_string = True + string_char = char + if level == 0: + sections.append([i, i1]) + sections.reverse() + out_string = "" + for section in sections: + out_string += line[section[0]:section[1]] + return out_string, sections + + def parse_keywords(test_str): + """Parse keywords""" keyword_match = KEYWORD_LIST_REGEX.match(test_str) keywords = [] while (keyword_match is not None): @@ -200,6 +377,7 @@ def parse_keywords(test_str): def read_var_def(line, type_word=None, fun_only=False): + """Attempt to read variable definition line""" if type_word is None: type_match = NAT_VAR_REGEX.match(line) if type_match is None: @@ -257,6 +435,7 @@ def read_var_def(line, type_word=None, fun_only=False): def read_fun_def(line, return_type=None, mod_fun=False): + """Attempt to read FUNCTION definition line""" mod_match = SUB_MOD_REGEX.match(line) mods_found = False keywords = [] @@ -296,6 +475,7 @@ def read_fun_def(line, return_type=None, mod_fun=False): def read_sub_def(line, mod_sub=False): + """Attempt to read SUBROUTINE definition line""" keywords = [] mod_match = SUB_MOD_REGEX.match(line) while mod_match is not None: @@ -322,6 +502,7 @@ def read_sub_def(line, mod_sub=False): def read_block_def(line): + """Attempt to read BLOCK definition line""" block_match = BLOCK_REGEX.match(line) if block_match is not None: name = block_match.group(1) @@ -354,6 +535,7 @@ def read_block_def(line): def read_select_def(line): + """Attempt to read SELECT definition line""" select_match = SELECT_REGEX.match(line) select_desc = None select_binding = None @@ -380,6 +562,7 @@ def read_select_def(line): def read_type_def(line): + """Attempt to read TYPE definition line""" type_match = TYPE_DEF_REGEX.match(line) if type_match is None: return None @@ -421,6 +604,7 @@ def read_type_def(line): def read_enum_def(line): + """Attempt to read ENUM definition line""" enum_match = ENUM_DEF_REGEX.match(line) if enum_match is not None: return 'enum', None @@ -428,6 +612,7 @@ def read_enum_def(line): def read_generic_def(line): + """Attempt to read generic procedure definition line""" generic_match = GENERIC_PRO_REGEX.match(line) if generic_match is None: return None @@ -455,6 +640,7 @@ def read_generic_def(line): def read_mod_def(line): + """Attempt to read MODULE and MODULE PROCEDURE definition lines""" mod_match = MOD_REGEX.match(line) if mod_match is None: return None @@ -483,6 +669,7 @@ def read_mod_def(line): def read_submod_def(line): + """Attempt to read SUBMODULE definition line""" submod_match = SUBMOD_REGEX.match(line) if submod_match is None: return None @@ -506,6 +693,7 @@ def read_submod_def(line): def read_prog_def(line): + """Attempt to read PROGRAM definition line""" prog_match = PROG_REGEX.match(line) if prog_match is None: return None @@ -514,6 +702,7 @@ def read_prog_def(line): def read_int_def(line): + """Attempt to read INTERFACE definition line""" int_match = INT_REGEX.match(line) if int_match is None: return None @@ -528,6 +717,7 @@ def read_int_def(line): def read_use_stmt(line): + """Attempt to read USE statement""" import_match = IMPORT_REGEX.match(line) if import_match is not None: trailing_line = line[import_match.end(0)-1:].lower() @@ -546,6 +736,7 @@ def read_use_stmt(line): def read_inc_stmt(line): + """Attempt to read INCLUDE statement""" inc_match = INCLUDE_REGEX.match(line) if inc_match is None: return None @@ -555,6 +746,7 @@ def read_inc_stmt(line): def read_vis_stmnt(line): + """Attempt to read PUBLIC/PRIVATE statement""" vis_match = VIS_REGEX.match(line) if vis_match is None: return None @@ -573,8 +765,216 @@ def read_vis_stmnt(line): read_submod_def, read_inc_stmt, read_vis_stmnt] -def process_file(file_str, close_open_scopes, path=None, fixed_format=False, debug=False, pp_defs=None): - def preprocess(): +class fortran_file: + def __init__(self, path=None): + self.path = path + self.contents_split = [] + self.contents_pp = [] + self.nLines = 0 + self.fixed = False + self.ast = None + + def copy(self): + """Copy content to new file object (does not copy objects)""" + copy_obj = fortran_file(self.path) + copy_obj.fixed = self.fixed + copy_obj.set_contents(self.contents_split) + return copy_obj + + def load_from_disk(self): + """Read file from disk""" + try: + if PY3K: + with open(self.path, 'r', encoding='utf-8', errors='replace') as fhandle: + contents = re.sub(r'\t', r' ', fhandle.read()) + self.contents_split = contents.splitlines() + else: + with io.open(self.path, 'r', encoding='utf-8', errors='replace') as fhandle: + contents = re.sub(r'\t', r' ', fhandle.read()) + self.contents_split = contents.splitlines() + self.fixed = detect_fixed_format(self.contents_split) + self.contents_pp = self.contents_split + self.nLines = len(self.contents_split) + except: + return 'Could not read/decode file' + else: + return None + + def apply_change(self, change): + """Apply a change to the file.""" + text = change.get('text', "") + change_range = change.get('range') + if not PY3K: + text = text.encode('utf-8') + if len(text) == 0: + text_split = [""] + else: + text_split = text.splitlines() + # Check for ending newline + if (text[-1] == "\n") or (text[-1] == "\r"): + text_split.append("") + + if change_range is None: + # The whole file has changed + self.set_contents(text_split) + return -1 + + start_line = change_range['start']['line'] + start_col = change_range['start']['character'] + end_line = change_range['end']['line'] + end_col = change_range['end']['character'] + + # Check for an edit occuring at the very end of the file + if start_line == self.nLines: + self.set_contents(self.contents_split + text_split) + return -1 + + # Check for single line edit + if (start_line == end_line) and (len(text_split) == 1): + prev_line = self.contents_split[start_line] + self.contents_split[start_line] = prev_line[:start_col] + text + prev_line[end_col:] + self.set_contents(self.contents_split, detect_format=False) + return start_line + + # Apply standard change to document + new_contents = [] + for i, line in enumerate(self.contents_split): + if (i < start_line) or (i > end_line): + new_contents.append(line) + continue + + if i == start_line: + for j, change_line in enumerate(text_split): + if j == 0: + new_contents.append(line[:start_col] + change_line) + else: + new_contents.append(change_line) + + if i == end_line: + new_contents[-1] += line[end_col:] + self.set_contents(new_contents) + return -1 + + def set_contents(self, contents_split, detect_format=True): + """Set file contents""" + self.contents_split = contents_split + self.contents_pp = self.contents_split + self.nLines = len(self.contents_split) + if detect_format: + self.fixed = detect_fixed_format(self.contents_split) + + def get_line(self, line_number, pp_content=False): + """Get single line from file""" + try: + if pp_content: + return self.contents_pp[line_number] + else: + return self.contents_split[line_number] + except: + return None + + def get_code_line(self, line_number, forward=True, backward=True, pp_content=False, strip_comment=False): + """Get full code line from file including any adjacent continuations""" + curr_line = self.get_line(line_number, pp_content) + if curr_line is None: + return [], None, [] + # Search backward for prefix lines + line_ind = line_number - 1 + pre_lines = [] + if backward: + if self.fixed: # Fixed format file + tmp_line = curr_line + while(line_ind > 0): + if FIXED_CONT_REGEX.match(tmp_line): + prev_line = tmp_line + tmp_line = self.get_line(line_ind, pp_content) + if line_ind == line_number-1: + curr_line = ' '*6 + curr_line[6:] + else: + pre_lines[-1] = ' '*6 + prev_line[6:] + pre_lines.append(tmp_line) + else: + break + line_ind -= 1 + else: # Free format file + opt_cont_match = FREE_CONT_REGEX.match(curr_line) + if opt_cont_match is not None: + curr_line = curr_line[opt_cont_match.end(0):] + while(line_ind > 0): + tmp_line = strip_strings(self.get_line(line_ind, pp_content), maintain_len=True) + tmp_no_comm = tmp_line.split('!')[0] + cont_ind = tmp_no_comm.rfind('&') + opt_cont_match = FREE_CONT_REGEX.match(tmp_no_comm) + if opt_cont_match is not None: + if cont_ind == opt_cont_match.end(0)-1: + break + tmp_no_comm = tmp_no_comm[opt_cont_match.end(0):] + if cont_ind >= 0: + pre_lines.append(tmp_no_comm[:cont_ind]) + else: + break + line_ind -= 1 + # Search forward for trailing lines with continuations + line_ind = line_number + 1 + post_lines = [] + if forward: + if self.fixed: + if line_ind < self.nLines: + next_line = self.get_line(line_ind, pp_content) + line_ind += 1 + cont_match = FIXED_CONT_REGEX.match(next_line) + while((cont_match is not None) and (line_ind < self.nLines)): + post_lines.append(' '*6 + next_line[6:]) + next_line = self.get_line(line_ind, pp_content) + line_ind += 1 + cont_match = FIXED_CONT_REGEX.match(next_line) + else: + line_stripped = strip_strings(curr_line, maintain_len=True) + iAmper = line_stripped.find('&') + iComm = line_stripped.find('!') + if iComm < 0: + iComm = iAmper + 1 + next_line = '' + while ((iAmper >= 0) and (iAmper < iComm)): + if line_ind == line_number + 1: + curr_line = curr_line[:iAmper] + elif next_line != '': + post_lines[-1] = next_line[:iAmper] + next_line = self.get_line(line_ind, pp_content) + line_ind += 1 + # Skip empty or comment lines + match = FREE_COMMENT_LINE_MATCH.match(next_line) + if (next_line.rstrip() == '') or (match is not None): + next_line = '' + post_lines.append('') + continue + cont_match = FREE_CONT_REGEX.match(next_line) + if cont_match is not None: + next_line = ' '*cont_match.end(0) + next_line[cont_match.end(0):] + post_lines.append(next_line) + line_stripped = strip_strings(next_line, maintain_len=True) + iAmper = line_stripped.find('&') + iComm = line_stripped.find('!') + if iComm < 0: + iComm = iAmper + 1 + # Detect start of comment in current line + if strip_comment: + curr_line = self.strip_comment(curr_line) + pre_lines.reverse() + return pre_lines, curr_line, post_lines + + def strip_comment(self, line): + """Strip comment from line""" + if self.fixed: + if (FIXED_COMMENT_LINE_MATCH.match(line) is not None) \ + and (FIXED_OPENMP_MATCH.match(line) is not None): + return '' + else: + if FREE_OPENMP_MATCH.match(line) is None: + line = line.split('!')[0] + return line + + def preprocess(self, pp_defs=None, debug=False): # Look for and mark excluded preprocessor paths in file # Initial implementation only looks for "if" and "ifndef" statements. # For "if" statements all blocks are excluded except the "else" block if present @@ -632,7 +1032,7 @@ def replace_vars(line): defs_tmp = pp_defs.copy() output_file = [] def_cont_name = None - for (i, line) in enumerate(file_str): + for (i, line) in enumerate(self.contents_split): # Handle multiline macro continuation if def_cont_name is not None: output_file.append("") @@ -730,62 +1130,69 @@ def replace_vars(line): for def_tmp, value in defs_tmp.items(): if line.find(def_tmp) >= 0: if debug: - print('{1} !!! Macro sub({0}) "{2}" -> "{3}"'.format(i+1, - line.strip(), def_tmp, value)) + print('{1} !!! Macro sub({0}) "{2}" -> "{3}"'.format( + i+1, line.strip(), def_tmp, value + )) line = line.replace(def_tmp, value) output_file.append(line) - return pp_skips, pp_defines, output_file + self.contents_pp = output_file + return pp_skips, pp_defines + + +def process_file(file_obj, close_open_scopes, debug=False, pp_defs=None): + """Build file AST by parsing file""" # - if fixed_format: + if file_obj.fixed: COMMENT_LINE_MATCH = FIXED_COMMENT_LINE_MATCH - CONT_REGEX = FIXED_CONT_REGEX DOC_COMMENT_MATCH = FIXED_DOC_MATCH else: COMMENT_LINE_MATCH = FREE_COMMENT_LINE_MATCH - CONT_REGEX = FREE_CONT_REGEX DOC_COMMENT_MATCH = FREE_DOC_MATCH # - file_obj = fortran_file(path) + file_ast = fortran_ast(file_obj) # if pp_defs is not None: if debug: print("=== PreProc Pass ===\n") - pp_skips, pp_defines, file_str = preprocess() + pp_skips, pp_defines = file_obj.preprocess(pp_defs=pp_defs, debug=debug) for pp_reg in pp_skips: - file_obj.start_ppif(pp_reg[0]) - file_obj.end_ppif(pp_reg[1]) + file_ast.start_ppif(pp_reg[0]) + file_ast.end_ppif(pp_reg[1]) if debug: print("\n=== Parsing Pass ===\n") else: pp_skips = [] pp_defines = [] # - line_number = 0 - next_line_num = 1 + line_ind = 0 + next_line_ind = 0 + line_number = 1 int_counter = 0 block_counter = 0 do_counter = 0 if_counter = 0 select_counter = 0 block_id_stack = [] - next_line = None line_ind = 0 + semi_split = [] doc_string = None - while((line_ind < len(file_str)) or (next_line is not None)): + while((next_line_ind < file_obj.nLines) or (len(semi_split) > 0)): if (doc_string is not None) and (doc_string != ''): - file_obj.add_doc('!! ' + doc_string) + file_ast.add_doc('!! ' + doc_string) if(debug): print('{1} !!! Doc string({0})'.format(line_number, doc_string)) doc_string = None # Get next line - if next_line is None: - line = file_str[line_ind] - line_ind += 1 + if len(semi_split) > 0: + line = semi_split[0] + semi_split = semi_split[1:] + get_full = False else: - line = next_line - next_line = None - line_number = next_line_num - next_line_num = line_number + 1 + line_ind = next_line_ind + line_number = line_ind + 1 + line = file_obj.get_line(line_ind, pp_content=True) + next_line_ind = line_ind + 1 + get_full = True if line == '': continue # Skip empty lines # Skip comment lines @@ -799,24 +1206,24 @@ def replace_vars(line): doc_forward = True else: doc_forward = False - if line_ind < len(file_str): - next_line = file_str[line_ind] - line_ind += 1 + if next_line_ind < file_obj.nLines: + next_line = file_obj.get_line(next_line_ind) + next_line_ind += 1 doc_match = DOC_COMMENT_MATCH.match(next_line) - while((doc_match is not None) and (line_ind < len(file_str))): + while((doc_match is not None) and (next_line_ind < file_obj.nLines)): doc_lines.append(next_line[doc_match.end(0):].strip()) - next_line_num += 1 - next_line = file_str[line_ind] - line_ind += 1 + next_line = file_obj.get_line(next_line_ind) + next_line_ind += 1 doc_match = DOC_COMMENT_MATCH.match(next_line) + next_line_ind -= 1 if(debug): for (i, doc_line) in enumerate(doc_lines): - print('{1} !!! Doc string({0})'.format(abs(line_number)+i, doc_line)) + print('{1} !!! Doc string({0})'.format(line_number+i, doc_line)) line_sum = 0 for doc_line in doc_lines: line_sum += len(doc_line) if line_sum > 0: - file_obj.add_doc('!! ' + '\n!! '.join(doc_lines), forward=doc_forward) + file_ast.add_doc('!! ' + '\n!! '.join(doc_lines), forward=doc_forward) continue do_skip = False for pp_reg in pp_skips: @@ -827,55 +1234,32 @@ def replace_vars(line): do_skip = True if do_skip: continue - # Get line label + # Get full line + if get_full: + _, line, post_lines = file_obj.get_code_line(line_ind, backward=False, pp_content=True) + next_line_ind += len(post_lines) + line = ''.join([line] + post_lines) + # print(line) line, line_label = strip_line_label(line) - # Merge lines with continuations - line = line.rstrip() - if fixed_format: - if line_ind < len(file_str): - next_line = file_str[line_ind] - line_ind += 1 - cont_match = CONT_REGEX.match(next_line) - while(cont_match is not None and (line_ind < len(file_str))): - line = line.rstrip() + next_line[6:].strip() - next_line_num += 1 - next_line = file_str[line_ind] - line_ind += 1 - cont_match = CONT_REGEX.match(next_line) - line_stripped = strip_strings(line, maintain_len=True) - else: - line_stripped = strip_strings(line, maintain_len=True) - iAmper = line_stripped.find('&') - iComm = line_stripped.find('!') - if iComm < 0: - iComm = iAmper + 1 - while (iAmper >= 0 and iAmper < iComm): - line_prefix = line[:iAmper] - next_line = file_str[line_ind] - line_ind += 1 - # Skip empty or comment lines - match = COMMENT_LINE_MATCH.match(next_line) - if (next_line.rstrip() == '') or (match is not None): - next_line_num += 1 - continue - cont_match = CONT_REGEX.match(next_line) - if cont_match is not None: - next_line = next_line[cont_match.end(0):] - next_line_num += 1 - line = line_prefix.rstrip() + ' ' + next_line.strip() - line_stripped = strip_strings(line, maintain_len=True) - iAmper = line_stripped.find('&') - iComm = line_stripped.find('!') - if iComm < 0: - iComm = iAmper + 1 - next_line = None + line_stripped = strip_strings(line, maintain_len=True) # Split lines with semicolons semi_colon_ind = line_stripped.find(';') - if semi_colon_ind >= 0: - next_line = line[semi_colon_ind+1:] - next_line_num = line_number - line = line[:semi_colon_ind] - line_stripped = line_stripped[:semi_colon_ind] + if semi_colon_ind > 0: + semi_inds = [] + tmp_line = line_stripped + while(semi_colon_ind >= 0): + semi_inds.append(semi_colon_ind) + tmp_line = tmp_line[semi_colon_ind+1:] + semi_colon_ind = tmp_line.find(';') + i0 = 0 + for semi_colon_ind in semi_inds: + semi_split.append(line[i0:i0+semi_colon_ind]) + i0 += semi_colon_ind+1 + if len(semi_split) > 0: + semi_split.append(line[i0:]) + line = semi_split[0] + semi_split = semi_split[1:] + line_stripped = strip_strings(line, maintain_len=True) # Find trailing comments comm_ind = line_stripped.find('!') if comm_ind >= 0: @@ -885,7 +1269,7 @@ def replace_vars(line): line_no_comment = line line_post_comment = None # Test for scope end - if file_obj.END_SCOPE_WORD is not None: + if file_ast.END_SCOPE_WORD is not None: match = END_WORD_REGEX.match(line_no_comment) # Handle end statement if match is not None: @@ -893,22 +1277,22 @@ def replace_vars(line): if (end_scope_word is not None) or (match.group(2) == ""): if end_scope_word is not None: end_scope_word = end_scope_word.strip().upper() - if ((end_scope_word != file_obj.END_SCOPE_WORD) - and (file_obj.current_scope.req_named_end() or (end_scope_word is not None)) - and (file_obj.current_scope is not file_obj.none_scope)): - file_obj.end_errors.append([line_number, file_obj.current_scope.sline]) - if (file_obj.current_scope.get_type() == SELECT_TYPE_ID) \ - and (file_obj.current_scope.is_type_region()): - file_obj.end_scope(line_number) - file_obj.end_scope(line_number) + if ((end_scope_word != file_ast.END_SCOPE_WORD) + and (file_ast.current_scope.req_named_end() or (end_scope_word is not None)) + and (file_ast.current_scope is not file_ast.none_scope)): + file_ast.end_errors.append([line_number, file_ast.current_scope.sline]) + if (file_ast.current_scope.get_type() == SELECT_TYPE_ID) \ + and (file_ast.current_scope.is_type_region()): + file_ast.end_scope(line_number) + file_ast.end_scope(line_number) if(debug): print('{1} !!! END "{2}" scope({0})'.format(line_number, line.strip(), end_scope_word)) continue # Look for old-style end of DO loops with line labels - if (file_obj.END_SCOPE_WORD == 'DO') and (line_label is not None): + if (file_ast.END_SCOPE_WORD == 'DO') and (line_label is not None): did_close = False while (len(block_id_stack) > 0) and (line_label == block_id_stack[-1]): - file_obj.end_scope(line_number) + file_ast.end_scope(line_number) block_id_stack.pop() did_close = True if(debug): @@ -919,15 +1303,15 @@ def replace_vars(line): match = IMPLICIT_REGEX.match(line_no_comment) if match is not None: err_message = None - if file_obj.current_scope is None: + if file_ast.current_scope is None: err_message = "IMPLICIT statement without enclosing scope" else: if match.group(1).lower() == 'none': - file_obj.current_scope.set_implicit(False, line_number) + file_ast.current_scope.set_implicit(False, line_number) else: - file_obj.current_scope.set_implicit(True, line_number) + file_ast.current_scope.set_implicit(True, line_number) if err_message is not None: - file_obj.parse_errors.append({ + file_ast.parse_errors.append({ "line": line_number, "schar": match.start(1), "echar": match.end(1), @@ -942,14 +1326,14 @@ def replace_vars(line): if match is not None: err_message = None try: - if file_obj.current_scope is None: + if file_ast.current_scope is None: err_message = "CONTAINS statement without enclosing scope" else: - file_obj.current_scope.mark_contains(line_number) + file_ast.current_scope.mark_contains(line_number) except ValueError: err_message = "Multiple CONTAINS statements in scope" if err_message is not None: - file_obj.parse_errors.append({ + file_ast.parse_errors.append({ "line": line_number, "schar": match.start(1), "echar": match.end(1), @@ -982,9 +1366,9 @@ def replace_vars(line): link_name = None procedure_def = False if desc_string[:3] == 'PRO': - if file_obj.current_scope.get_type() == INTERFACE_TYPE_ID: + if file_ast.current_scope.get_type() == INTERFACE_TYPE_ID: for var_name in var_names: - file_obj.add_int_member(var_name) + file_ast.add_int_member(var_name) if(debug): print('{1} !!! INTERFACE-PRO statement({0})'.format(line_number, line.strip())) continue @@ -1006,45 +1390,45 @@ def replace_vars(line): name_stripped = name_stripped.split('(')[0].strip() keywords, keyword_info = map_keywords(obj[1]) if procedure_def: - new_var = fortran_meth(file_obj, line_number, name_stripped, desc_string, + new_var = fortran_meth(file_ast, line_number, name_stripped, desc_string, keywords, keyword_info=keyword_info, link_obj=link_name) else: - new_var = fortran_var(file_obj, line_number, name_stripped, desc_string, + new_var = fortran_var(file_ast, line_number, name_stripped, desc_string, keywords, keyword_info=keyword_info, link_obj=link_name) - file_obj.add_variable(new_var) + file_ast.add_variable(new_var) if(debug): print('{1} !!! VARIABLE statement({0})'.format(line_number, line.strip())) elif obj_type == 'mod': - new_mod = fortran_module(file_obj, line_number, obj) - file_obj.add_scope(new_mod, END_MOD_WORD) + new_mod = fortran_module(file_ast, line_number, obj) + file_ast.add_scope(new_mod, END_MOD_WORD) if(debug): print('{1} !!! MODULE statement({0})'.format(line_number, line.strip())) elif obj_type == 'smod': - new_smod = fortran_submodule(file_obj, line_number, obj[0], ancestor_name=obj[1]) - file_obj.add_scope(new_smod, END_SMOD_WORD) + new_smod = fortran_submodule(file_ast, line_number, obj[0], ancestor_name=obj[1]) + file_ast.add_scope(new_smod, END_SMOD_WORD) if(debug): print('{1} !!! SUBMODULE statement({0})'.format(line_number, line.strip())) elif obj_type == 'prog': - new_prog = fortran_program(file_obj, line_number, obj) - file_obj.add_scope(new_prog, END_PROG_WORD) + new_prog = fortran_program(file_ast, line_number, obj) + file_ast.add_scope(new_prog, END_PROG_WORD) if(debug): print('{1} !!! PROGRAM statement({0})'.format(line_number, line.strip())) elif obj_type == 'sub': keywords, _ = map_keywords(obj[3]) - new_sub = fortran_subroutine(file_obj, line_number, obj[0], args=obj[1], mod_sub=obj[2], + new_sub = fortran_subroutine(file_ast, line_number, obj[0], args=obj[1], mod_sub=obj[2], keywords=keywords) - file_obj.add_scope(new_sub, END_SUB_WORD) + file_ast.add_scope(new_sub, END_SUB_WORD) if(debug): print('{1} !!! SUBROUTINE statement({0})'.format(line_number, line.strip())) elif obj_type == 'fun': keywords, _ = map_keywords(obj[4]) - new_fun = fortran_function(file_obj, line_number, obj[0], args=obj[1], + new_fun = fortran_function(file_ast, line_number, obj[0], args=obj[1], mod_fun=obj[3], keywords=keywords, return_type=obj[2][0], result_var=obj[2][1]) - file_obj.add_scope(new_fun, END_FUN_WORD) + file_ast.add_scope(new_fun, END_FUN_WORD) if obj[2][0] is not None: - new_obj = fortran_var(file_obj, line_number, obj[0], obj[2][0][0], obj[2][0][1]) - file_obj.add_variable(new_obj) + new_obj = fortran_var(file_ast, line_number, obj[0], obj[2][0][0], obj[2][0][1]) + file_ast.add_variable(new_obj) if(debug): print('{1} !!! FUNCTION statement({0})'.format(line_number, line.strip())) elif obj_type == 'block': @@ -1052,8 +1436,8 @@ def replace_vars(line): if name is None: block_counter += 1 name = '#BLOCK{0}'.format(block_counter) - new_block = fortran_block(file_obj, line_number, name) - file_obj.add_scope(new_block, END_BLOCK_WORD, req_container=True) + new_block = fortran_block(file_ast, line_number, name) + file_ast.add_scope(new_block, END_BLOCK_WORD, req_container=True) if(debug): print('{1} !!! BLOCK statement({0})'.format(line_number, line.strip())) elif obj_type == 'do': @@ -1061,8 +1445,8 @@ def replace_vars(line): name = '#DO{0}'.format(do_counter) if obj[0] != '': block_id_stack.append(obj[0]) - new_do = fortran_do(file_obj, line_number, name) - file_obj.add_scope(new_do, END_DO_WORD, req_container=True) + new_do = fortran_do(file_ast, line_number, name) + file_ast.add_scope(new_do, END_DO_WORD, req_container=True) if(debug): print('{1} !!! DO statement({0})'.format(line_number, line.strip())) elif obj_type == 'where': @@ -1070,49 +1454,49 @@ def replace_vars(line): if not obj: do_counter += 1 name = '#WHERE{0}'.format(do_counter) - new_do = fortran_where(file_obj, line_number, name) - file_obj.add_scope(new_do, END_WHERE_WORD, req_container=True) + new_do = fortran_where(file_ast, line_number, name) + file_ast.add_scope(new_do, END_WHERE_WORD, req_container=True) if(debug): print('{1} !!! WHERE statement({0})'.format(line_number, line.strip())) elif obj_type == 'assoc': block_counter += 1 name = '#ASSOC{0}'.format(block_counter) - new_assoc = fortran_associate(file_obj, line_number, name) - file_obj.add_scope(new_assoc, END_ASSOCIATE_WORD, req_container=True) + new_assoc = fortran_associate(file_ast, line_number, name) + file_ast.add_scope(new_assoc, END_ASSOCIATE_WORD, req_container=True) if(debug): print('{1} !!! ASSOCIATE statement({0})'.format(line_number, line.strip())) elif obj_type == 'if': if_counter += 1 name = '#IF{0}'.format(if_counter) - new_if = fortran_if(file_obj, line_number, name) - file_obj.add_scope(new_if, END_IF_WORD, req_container=True) + new_if = fortran_if(file_ast, line_number, name) + file_ast.add_scope(new_if, END_IF_WORD, req_container=True) if(debug): print('{1} !!! IF statement({0})'.format(line_number, line.strip())) elif obj_type == 'select': select_counter += 1 name = '#SELECT{0}'.format(select_counter) - new_select = fortran_select(file_obj, line_number, name, obj) - file_obj.add_scope(new_select, END_SELECT_WORD, req_container=True) + new_select = fortran_select(file_ast, line_number, name, obj) + file_ast.add_scope(new_select, END_SELECT_WORD, req_container=True) new_var = new_select.create_binding_variable( - file_obj, line_number, '{0}({1})'.format(obj[2], obj[1]), obj[0] + file_ast, line_number, '{0}({1})'.format(obj[2], obj[1]), obj[0] ) if new_var is not None: - file_obj.add_variable(new_var) + file_ast.add_variable(new_var) if(debug): print('{1} !!! SELECT statement({0})'.format(line_number, line.strip())) elif obj_type == 'typ': keywords, _ = map_keywords(obj[2]) - new_type = fortran_type(file_obj, line_number, obj[0], keywords) + new_type = fortran_type(file_ast, line_number, obj[0], keywords) if obj[1] is not None: new_type.set_inherit(obj[1]) - file_obj.add_scope(new_type, END_TYPED_WORD, req_container=True) + file_ast.add_scope(new_type, END_TYPED_WORD, req_container=True) if(debug): print('{1} !!! TYPE statement({0})'.format(line_number, line.strip())) elif obj_type == 'enum': block_counter += 1 name = '#ENUM{0}'.format(block_counter) - new_enum = fortran_enum(file_obj, line_number, name) - file_obj.add_scope(new_enum, END_ENUMD_WORD, req_container=True) + new_enum = fortran_enum(file_ast, line_number, name) + file_ast.add_scope(new_enum, END_ENUMD_WORD, req_container=True) if(debug): print('{1} !!! ENUM statement({0})'.format(line_number, line.strip())) elif obj_type == 'int': @@ -1120,61 +1504,61 @@ def replace_vars(line): if name is None: int_counter += 1 name = '#GEN_INT{0}'.format(int_counter) - new_int = fortran_int(file_obj, line_number, name, abstract=obj[1]) - file_obj.add_scope(new_int, END_INT_WORD, req_container=True) + new_int = fortran_int(file_ast, line_number, name, abstract=obj[1]) + file_ast.add_scope(new_int, END_INT_WORD, req_container=True) if(debug): print('{1} !!! INTERFACE statement({0})'.format(line_number, line.strip())) elif obj_type == 'gen': - new_int = fortran_int(file_obj, line_number, obj[0], abstract=False) - file_obj.add_scope(new_int, END_INT_WORD, req_container=True) + new_int = fortran_int(file_ast, line_number, obj[0], abstract=False) + file_ast.add_scope(new_int, END_INT_WORD, req_container=True) for pro_link in obj[1]: - file_obj.add_int_member(pro_link) - file_obj.end_scope(line_number) + file_ast.add_int_member(pro_link) + file_ast.end_scope(line_number) if(debug): print('{1} !!! GENERIC statement({0})'.format(line_number, line.strip())) elif obj_type == 'int_pro': - if (file_obj.current_scope is None) or (file_obj.current_scope.get_type() != INTERFACE_TYPE_ID): + if (file_ast.current_scope is None) or (file_ast.current_scope.get_type() != INTERFACE_TYPE_ID): continue for name in obj: - file_obj.add_int_member(name) + file_ast.add_int_member(name) if(debug): print('{1} !!! INTERFACE-PRO statement({0})'.format(line_number, line.strip())) elif obj_type == 'use': - file_obj.add_use(obj[0], line_number, obj[1]) + file_ast.add_use(obj[0], line_number, obj[1]) if(debug): print('{1} !!! USE statement({0})'.format(line_number, line.strip())) elif obj_type == 'import': - file_obj.add_use('#IMPORT', line_number, obj[0]) + file_ast.add_use('#IMPORT', line_number, obj[0]) if(debug): print('{1} !!! IMPORT statement({0})'.format(line_number, line.strip())) elif obj_type == 'inc': - file_obj.add_include(obj[0], line_number) + file_ast.add_include(obj[0], line_number) if(debug): print('{1} !!! INCLUDE statement({0})'.format(line_number, line.strip())) elif obj_type == 'vis': if (len(obj[1]) == 0) and (obj[0] == 1): - file_obj.current_scope.set_default_vis(-1) + file_ast.current_scope.set_default_vis(-1) else: if obj[0] == 1: for word in obj[1]: - file_obj.add_private(word) + file_ast.add_private(word) else: for word in obj[1]: - file_obj.add_public(word) + file_ast.add_public(word) if(debug): print('{1} !!! Visiblity statement({0})'.format(line_number, line.strip())) - file_obj.close_file(line_number) + file_ast.close_file(line_number) if debug: - if len(file_obj.end_errors) > 0: + if len(file_ast.end_errors) > 0: print("\n=== Scope Errors ===\n") - for error in file_obj.end_errors: + for error in file_ast.end_errors: if error[0] >= 0: message = 'Unexpected end of scope at line {0}'.format(error[0]) else: message = 'Unexpected end statement: No open scopes' print('{0}: {1}'.format(error[1], message)) - if len(file_obj.parse_errors) > 0: + if len(file_ast.parse_errors) > 0: print("\n=== Parsing Errors ===\n") - for error in file_obj.parse_errors: + for error in file_ast.parse_errors: print('{0}: {1}'.format(error["line"], error["mess"])) - return file_obj + return file_ast From 005ecfd12c0d2aadc76812aeeb406d4083854faa Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Thu, 21 Mar 2019 10:39:13 -0400 Subject: [PATCH 05/44] Add command line options: - Set the number of threads used during initialization - Provide a notification when workspace initialization is complete --- fortls/__init__.py | 10 ++++++++++ fortls/langserver.py | 6 +++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/fortls/__init__.py b/fortls/__init__.py index 4dbbc0e..19891e0 100644 --- a/fortls/__init__.py +++ b/fortls/__init__.py @@ -23,6 +23,14 @@ def main(): '--version', action="store_true", help="Print server version number and exit" ) + parser.add_argument( + '--nthreads', type=int, default=4, + help="Number of threads to use during workspace initialization (default: 4)" + ) + parser.add_argument( + '--notify_init', action="store_true", + help="Send notification message when workspace initialization is complete" + ) parser.add_argument( '--symbol_skip_mem', action="store_true", help="Do not include type members in document symbol results" @@ -137,6 +145,8 @@ def main(): or (args.debug_workspace_symbols is not None)) # settings = { + "nthreads": args.nthreads, + "notify_init": args.notify_init, "symbol_include_mem": (not args.symbol_skip_mem), "sync_type": 2 if args.incremental_sync else 1, "autocomplete_no_prefix": args.autocomplete_no_prefix, diff --git a/fortls/langserver.py b/fortls/langserver.py index 9cfa5aa..a6a4838 100644 --- a/fortls/langserver.py +++ b/fortls/langserver.py @@ -73,6 +73,8 @@ def __init__(self, conn, debug_log=False, settings={}): # Intrinsic (re-loaded during initialize) self.statements, self.keywords, self.intrinsic_funs, self.intrinsic_mods = load_intrinsics() # Get launch settings + self.nthreads = settings.get("nthreads", 4) + self.notify_init = settings.get("notify_init", False) self.symbol_include_mem = settings.get("symbol_include_mem", True) self.sync_type = settings.get("sync_type", 1) self.autocomplete_no_prefix = settings.get("autocomplete_no_prefix", False) @@ -249,6 +251,8 @@ def serve_initialize(self, request): } if self.enable_code_actions: server_capabilities["codeActionProvider"] = True + if self.notify_init: + self.post_messages.append([3, "FORTLS initialization complete"]) return {"capabilities": server_capabilities} # "workspaceSymbolProvider": True, # "streaming": False, @@ -1087,7 +1091,7 @@ def workspace_init(self): file_list.append(filepath) # Process files from multiprocessing import Pool - pool = Pool(processes=4) + pool = Pool(processes=self.nthreads) results = {} for filepath in file_list: results[filepath] = pool.apply_async(init_file, args=(filepath, self.pp_defs)) From 46f2a614bca413e3d87d6a51e8484f18ac2672c0 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Thu, 21 Mar 2019 11:17:49 -0400 Subject: [PATCH 06/44] Improve parsing performance --- fortls/parse_fortran.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/fortls/parse_fortran.py b/fortls/parse_fortran.py index e75b7ed..4c6f733 100644 --- a/fortls/parse_fortran.py +++ b/fortls/parse_fortran.py @@ -75,6 +75,7 @@ SQ_STRING_REGEX = re.compile(r'\'[^\']*\'', re.I) DQ_STRING_REGEX = re.compile(r'\"[^\"]*\"', re.I) LINE_LABEL_REGEX = re.compile(r'[ ]*([0-9]+)[ ]+', re.I) +NON_DEF_REGEX = re.compile(r'[ ]*(CALL[ ]+[a-z_]|[a-z_][a-z0-9_%]*[ ]*=)', re.I) # Fixed format matching rules FIXED_COMMENT_LINE_MATCH = re.compile(r'(!|c|d|\*)', re.I) FIXED_CONT_REGEX = re.compile(r'( [\S])') @@ -759,12 +760,6 @@ def read_vis_stmnt(line): return 'vis', [vis_type, mod_words] -def_tests = [read_var_def, read_sub_def, read_fun_def, read_block_def, - read_select_def, read_type_def, read_enum_def, read_use_stmt, - read_int_def, read_generic_def, read_mod_def, read_prog_def, - read_submod_def, read_inc_stmt, read_vis_stmnt] - - class fortran_file: def __init__(self, path=None): self.path = path @@ -1139,6 +1134,14 @@ def replace_vars(line): return pp_skips, pp_defines +def_tests = [ + read_var_def, read_sub_def, read_fun_def, read_block_def, + read_select_def, read_type_def, read_enum_def, read_use_stmt, + read_int_def, read_generic_def, read_mod_def, read_prog_def, + read_submod_def, read_inc_stmt, read_vis_stmnt +] + + def process_file(file_obj, close_open_scopes, debug=False, pp_defs=None): """Build file AST by parsing file""" # @@ -1207,12 +1210,12 @@ def process_file(file_obj, close_open_scopes, debug=False, pp_defs=None): else: doc_forward = False if next_line_ind < file_obj.nLines: - next_line = file_obj.get_line(next_line_ind) + next_line = file_obj.get_line(next_line_ind, pp_content=True) next_line_ind += 1 doc_match = DOC_COMMENT_MATCH.match(next_line) while((doc_match is not None) and (next_line_ind < file_obj.nLines)): doc_lines.append(next_line[doc_match.end(0):].strip()) - next_line = file_obj.get_line(next_line_ind) + next_line = file_obj.get_line(next_line_ind, pp_content=True) next_line_ind += 1 doc_match = DOC_COMMENT_MATCH.match(next_line) next_line_ind -= 1 @@ -1299,6 +1302,10 @@ def process_file(file_obj, close_open_scopes, debug=False, pp_defs=None): print('{1} !!! END "DO" scope({0})'.format(line_number, line.strip())) if did_close: continue + # Skip if known generic code line + match = NON_DEF_REGEX.match(line_no_comment) + if match is not None: + continue # Mark implicit statement match = IMPLICIT_REGEX.match(line_no_comment) if match is not None: From e67c3b16bc8d208489b8370fb1c0eb5a4462dd05 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Thu, 21 Mar 2019 12:05:34 -0400 Subject: [PATCH 07/44] Fix diagnostics for interface statements with USE instead of IMPORT --- fortls/objects.py | 53 +++++++++++++++++++++++++++-------------------- 1 file changed, 30 insertions(+), 23 deletions(-) diff --git a/fortls/objects.py b/fortls/objects.py index ea3fc68..6e38138 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -125,7 +125,7 @@ def get_use_tree(scope, use_dict, obj_tree, only_list=[]): return use_dict -def find_in_scope(scope, var_name, obj_tree): +def find_in_scope(scope, var_name, obj_tree, interface=False): def check_scope(local_scope, var_name_lower, filter_public=False): for child in local_scope.get_children(): if child.name.startswith("#GEN_INT"): @@ -159,6 +159,16 @@ def check_scope(local_scope, var_name_lower, filter_public=False): tmp_var = check_scope(use_scope, var_name_lower, filter_public=True) if tmp_var is not None: return tmp_var + # Only search local and imported names for interfaces + if interface: + in_import = False + for use_line in scope.use: + if use_line[0].startswith('#import'): + if use_line[1].count(var_name_lower) > 0: + in_import = True + break + if not in_import: + return None # Check parent scopes if scope.parent is not None: tmp_var = find_in_scope(scope.parent, var_name, obj_tree) @@ -385,7 +395,7 @@ def req_named_end(self): def check_valid_parent(self): return True - def check_definition(self, obj_tree, known_types={}, import_objs=None): + def check_definition(self, obj_tree, known_types={}, interface=False): return None, known_types @@ -484,19 +494,16 @@ def check_definitions(self, obj_tree): else: FQSN_dict[child.FQSN] = child.sline - 1 # Get list of imported objects for interfaces - import_objs = None + is_interface = False if (self.parent is not None) and (self.parent.get_type() == INTERFACE_TYPE_ID): - import_objs = [] - for use_line in self.use: - if use_line[0].startswith('#import'): - import_objs += use_line[1] + is_interface = True errors = [] known_types = {} for child in self.children: line_number = child.sline - 1 # Check for type definition in scope def_error, known_types = child.check_definition( - obj_tree, known_types=known_types, import_objs=import_objs + obj_tree, known_types=known_types, interface=is_interface ) if def_error is not None: errors.append(def_error) @@ -1332,7 +1339,7 @@ def is_optional(self): def is_callable(self): return self.callable - def check_definition(self, obj_tree, known_types={}, import_objs=None): + def check_definition(self, obj_tree, known_types={}, interface=False): # Check for type definition in scope type_match = DEF_KIND_REGEX.match(self.desc) if type_match is not None: @@ -1341,7 +1348,7 @@ def check_definition(self, obj_tree, known_types={}, import_objs=None): return None, known_types desc_obj_name = type_match.group(2).strip().lower() if desc_obj_name not in known_types: - type_def = find_in_scope(self, desc_obj_name, obj_tree) + type_def = find_in_scope(self.parent, desc_obj_name, obj_tree, interface=interface) if type_def is None: type_defs = find_in_workspace(obj_tree, desc_obj_name, filter_public=True, exact_match=True) known_types[desc_obj_name] = None @@ -1358,18 +1365,18 @@ def check_definition(self, obj_tree, known_types={}, import_objs=None): type_info = known_types[desc_obj_name] if type_info is not None: if type_info[0] == 1: - type_def = type_info[1] - out_diag = fortran_diagnostic( - self.sline-1, message='Object "{0}" not found in scope'.format(desc_obj_name), - severity=1, find_word=desc_obj_name - ) - out_diag.add_related(path=type_def.file.path, line=type_def.sline-1, message='Possible object') - return out_diag, known_types - elif (import_objs is not None) and (desc_obj_name not in import_objs): - out_diag = fortran_diagnostic( - self.sline-1, message='Object "{0}" not imported in interface'.format(desc_obj_name), - severity=1, find_word=desc_obj_name - ) + if interface: + out_diag = fortran_diagnostic( + self.sline-1, message='Object "{0}" not found in scope'.format(desc_obj_name), + severity=1, find_word=desc_obj_name + ) + type_def = type_info[1] + out_diag.add_related(path=type_def.file.path, line=type_def.sline-1, message='Possible object') + else: + out_diag = fortran_diagnostic( + self.sline-1, message='Object "{0}" not imported in interface'.format(desc_obj_name), + severity=1, find_word=desc_obj_name + ) return out_diag, known_types return None, known_types @@ -1479,7 +1486,7 @@ def resolve_link(self, obj_tree): def is_callable(self): return True - def check_definition(self, obj_tree, known_types={}, import_objs=None): + def check_definition(self, obj_tree, known_types={}, interface=False): return None, known_types From d1017d8d2209c9b92e87d34e17cde76fdb89e174 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Fri, 15 Mar 2019 16:27:22 -0400 Subject: [PATCH 08/44] Updates to README with new features --- README.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.rst b/README.rst index 166d0f2..7a60ac9 100644 --- a/README.rst +++ b/README.rst @@ -45,6 +45,10 @@ Language Server Features - Use of unimported variables/objects in interface blocks - Statement placement errors ("CONTAINS", "IMPLICIT", "IMPORT") +- Code actions (``textDocument/codeAction``) [Experimental, must be enabled in settings] + + - Generate type-bound procedures and implementation templates for deferred procedures + **Notes/Limitations:** - Signature help is not available for overloaded subroutines/functions @@ -90,6 +94,8 @@ Language server settings The following global settings can be used when launching the language server. +* ``--nthreads`` Number of threads to use during workspace initialization (default: 4) +* ``--notify_init`` Send notification message when workspace initialization is complete * ``--symbol_skip_mem`` Do not include type members in document symbol results * ``--incremental_sync`` Use incremental document synchronization * ``--autocomplete_no_prefix`` Do not filter autocomplete results by variable prefix @@ -97,6 +103,7 @@ The following global settings can be used when launching the language server. * ``--use_signature_help`` Use signature help instead of snippets for subroutines/functions * ``--variable_hover`` Show hover information for variables (default: subroutines/functions only) * ``--preserve_keyword_order`` Display variable keywords information in original order (default: sort to consistent ordering) +* ``--enable_code_actions`` Enable experimental code actions (default: false) * ``--debug_log`` Write debug information to ``root_dir/fortls_debug.log`` (requires a specified ``root_dir`` during initialization) **Debug settings:** @@ -118,6 +125,7 @@ The following settings can be used to perform `standalone debug tests Date: Thu, 21 Mar 2019 19:59:42 -0400 Subject: [PATCH 09/44] Detect CONTAINS statement placement errors --- fortls/objects.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/fortls/objects.py b/fortls/objects.py index 6e38138..8f271a5 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -493,6 +493,13 @@ def check_definitions(self, obj_tree): FQSN_dict[child.FQSN] = child.sline - 1 else: FQSN_dict[child.FQSN] = child.sline - 1 + # + contains_line = -1 + if self.get_type() in (MODULE_TYPE_ID, SUBROUTINE_TYPE_ID, FUNCTION_TYPE_ID): + if self.contains_start is None: + contains_line = self.eline + else: + contains_line = self.contains_start # Get list of imported objects for interfaces is_interface = False if (self.parent is not None) and (self.parent.get_type() == INTERFACE_TYPE_ID): @@ -507,6 +514,13 @@ def check_definitions(self, obj_tree): ) if def_error is not None: errors.append(def_error) + # Detect contains errors + if (contains_line >= 0) and (child.get_type() in (SUBROUTINE_TYPE_ID, FUNCTION_TYPE_ID)): + new_diag = fortran_diagnostic( + line_number, message='Subroutine/Function definition before CONTAINS statement', + severity=1 + ) + errors.append(new_diag) # Skip masking/double checks for interface members if (self.parent is not None) and (self.parent.get_type() == INTERFACE_TYPE_ID): continue @@ -568,10 +582,14 @@ def add_subroutine(self, interface_string, no_contains=False): edits = [] line_number = self.eline - 1 if (self.contains_start is None) and (not no_contains): + first_sub_line = line_number + for child in self.children: + if child.get_type() in (SUBROUTINE_TYPE_ID, FUNCTION_TYPE_ID): + first_sub_line = min(first_sub_line, child.sline - 1) edits.append({ "range": { - "start": {"line": line_number, "character": 0}, - "end": {"line": line_number, "character": 0} + "start": {"line": first_sub_line, "character": 0}, + "end": {"line": first_sub_line, "character": 0} }, "newText": "CONTAINS\n" }) From 2953e466a233e7057d93ec16301ec6b90e26688a Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Thu, 21 Mar 2019 20:11:07 -0400 Subject: [PATCH 10/44] Add some documentation --- fortls/parse_fortran.py | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/fortls/parse_fortran.py b/fortls/parse_fortran.py index 4c6f733..ba84932 100644 --- a/fortls/parse_fortran.py +++ b/fortls/parse_fortran.py @@ -101,7 +101,12 @@ def get_var_stack(line): - """Get user-defined type field sequence""" + """Get user-defined type field sequence terminating the given line + + Examples: + "myvar%foo%bar" -> ["myvar", "foo", "bar"] + "CALL self%method(this%foo" -> ["this", "foo"] + """ if len(line) == 0: return None final_var, sections = get_paren_level(line) @@ -124,7 +129,7 @@ def get_var_stack(line): def expand_name(line, char_poss): - """Get full word containing current position""" + """Get full word containing given cursor position""" for word_match in WORD_REGEX.finditer(line): if word_match.start(0) <= char_poss and word_match.end(0) >= char_poss: return word_match.group(0) @@ -199,7 +204,7 @@ def get_line_context(line): return 'mod_mems', test_match[1][0] else: return 'mod_only', None - # Test if scope declaration or end statement + # Test if scope declaration or end statement (no completion provided) if SCOPE_DEF_REGEX.match(line) or END_REGEX.match(line): return 'skip', None # Test if import statement @@ -225,7 +230,7 @@ def get_line_context(line): # Only thing on line? if INT_STMNT_REGEX.match(line) is not None: return 'first', None - # Default context + # Default or skip context if type_def: return 'skip', None else: @@ -278,7 +283,12 @@ def repl_dq(m): def separate_def_list(test_str): - """Separate definition lists, skipping parenthesis and bracket groups""" + """Separate definition lists, skipping parenthesis and bracket groups + + Examples: + "var1, var2, var3" -> ["var1", "var2", "var3"] + "var, init_var(3) = [1,2,3], array(3,3)" -> ["var", "init_var", "array"] + """ stripped_str = strip_strings(test_str) paren_count = 0 def_list = [] @@ -304,7 +314,8 @@ def separate_def_list(test_str): def find_paren_match(test_str): - """Find matching closing parenthesis by searching forward""" + """Find matching closing parenthesis by searching forward, + returns -1 if no match is found""" paren_count = 1 ind = -1 for (i, char) in enumerate(test_str): @@ -320,6 +331,10 @@ def find_paren_match(test_str): def get_paren_level(line): """Get sub-string corresponding to a single parenthesis level, via backward search up through the line. + + Examples: + "CALL sub1(arg1,arg2" -> ("arg1,arg2", [[10, 19]]) + "CALL sub1(arg1(i),arg2" -> ("arg1,arg2", [[10, 14], [17, 22]]) """ if line == '': return '', [[0, 0]] @@ -357,8 +372,8 @@ def get_paren_level(line): return out_string, sections -def parse_keywords(test_str): - """Parse keywords""" +def parse_var_keywords(test_str): + """Parse Fortran variable declaration keywords""" keyword_match = KEYWORD_LIST_REGEX.match(test_str) keywords = [] while (keyword_match is not None): @@ -413,7 +428,7 @@ def read_var_def(line, type_word=None, fun_only=False): if not trailing_line[0] in (' ', ',', ':'): return None # - keywords, trailing_line = parse_keywords(trailing_line) + keywords, trailing_line = parse_var_keywords(trailing_line) # Check if function fun_def = read_fun_def(trailing_line, [type_word, keywords]) if (fun_def is not None) or fun_only: From 28a0f5bb67419bed170bb97883219ec7f126067c Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Thu, 21 Mar 2019 20:25:49 -0400 Subject: [PATCH 11/44] Fix bug in CONTAINS statement placement errors introduced in 1afa933 --- fortls/objects.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fortls/objects.py b/fortls/objects.py index 8f271a5..427b497 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -515,7 +515,7 @@ def check_definitions(self, obj_tree): if def_error is not None: errors.append(def_error) # Detect contains errors - if (contains_line >= 0) and (child.get_type() in (SUBROUTINE_TYPE_ID, FUNCTION_TYPE_ID)): + if (contains_line >= child.sline) and (child.get_type() in (SUBROUTINE_TYPE_ID, FUNCTION_TYPE_ID)): new_diag = fortran_diagnostic( line_number, message='Subroutine/Function definition before CONTAINS statement', severity=1 From f00849bb50952dc4cdeaad68df51e3b58487b08b Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Fri, 22 Mar 2019 10:43:56 -0400 Subject: [PATCH 12/44] Fix bug with TYPE definitions with an immediately following colon, ref #100 --- fortls/parse_fortran.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fortls/parse_fortran.py b/fortls/parse_fortran.py index ba84932..7c1d331 100644 --- a/fortls/parse_fortran.py +++ b/fortls/parse_fortran.py @@ -50,7 +50,7 @@ END_WORD_REGEX = re.compile(r'[ ]*END[ ]*(DO|WHERE|IF|BLOCK|ASSOCIATE|SELECT' r'|TYPE|ENUM|MODULE|SUBMODULE|PROGRAM|INTERFACE' r'|SUBROUTINE|FUNCTION)?([ ]+|$)', re.I) -TYPE_DEF_REGEX = re.compile(r'[ ]*(TYPE)[, ]+', re.I) +TYPE_DEF_REGEX = re.compile(r'[ ]*(TYPE)[, :]+', re.I) EXTENDS_REGEX = re.compile(r'EXTENDS[ ]*\(([a-z0-9_]*)\)', re.I) GENERIC_PRO_REGEX = re.compile(r'[ ]*(GENERIC)[ ]*::[ ]*[a-z]', re.I) GEN_ASSIGN_REGEX = re.compile(r'(ASSIGNMENT|OPERATOR)\(', re.I) From 7dd89eb7bc6bc72b7153d96eff343a3e8229055e Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Fri, 22 Mar 2019 10:45:01 -0400 Subject: [PATCH 13/44] Fix "RecursionError" exception with circular user-defined type references, fixes #100 --- fortls/objects.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/fortls/objects.py b/fortls/objects.py index 427b497..1cf22b1 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -976,6 +976,7 @@ def __init__(self, file_obj, line_number, name, keywords): self.in_children = [] self.inherit = None self.inherit_var = None + self.inherit_tmp = None if self.keywords.count(KEYWORD_ID_DICT['public']) > 0: self.vis = 1 if self.keywords.count(KEYWORD_ID_DICT['private']) > 0: @@ -998,12 +999,18 @@ def resolve_inherit(self, obj_tree): # self.inherit_var = find_in_scope(self.parent, self.inherit, obj_tree) if self.inherit_var is not None: + # Disable "resolve_inherit" to allow circular type references + self.inherit_tmp = self.inherit + self.inherit = None self.inherit_var.resolve_inherit(obj_tree) # Get current fields child_names = [] for child in self.children: child_names.append(child.name.lower()) child.resolve_inherit(obj_tree) + # Re-enable "resolve_inherit" to allow circular type references + self.inherit = self.inherit_tmp + self.inherit_tmp = None # Import for parent objects self.in_children = [] for child in self.inherit_var.get_children(): From 65754aa192729f4cff318bd7463e07535cc3a52f Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Fri, 22 Mar 2019 11:04:27 -0400 Subject: [PATCH 14/44] Detect and report visibility statements with no enclosing scope --- fortls/parse_fortran.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/fortls/parse_fortran.py b/fortls/parse_fortran.py index 7c1d331..83ef231 100644 --- a/fortls/parse_fortran.py +++ b/fortls/parse_fortran.py @@ -1558,15 +1558,24 @@ def process_file(file_obj, close_open_scopes, debug=False, pp_defs=None): if(debug): print('{1} !!! INCLUDE statement({0})'.format(line_number, line.strip())) elif obj_type == 'vis': - if (len(obj[1]) == 0) and (obj[0] == 1): - file_ast.current_scope.set_default_vis(-1) + if file_ast.current_scope is None: + file_ast.parse_errors.append({ + "line": line_number, + "schar": 0, + "echar": 0, + "mess": "Visibility statement without enclosing scope", + "sev": 1 + }) else: - if obj[0] == 1: - for word in obj[1]: - file_ast.add_private(word) + if (len(obj[1]) == 0) and (obj[0] == 1): + file_ast.current_scope.set_default_vis(-1) else: - for word in obj[1]: - file_ast.add_public(word) + if obj[0] == 1: + for word in obj[1]: + file_ast.add_private(word) + else: + for word in obj[1]: + file_ast.add_public(word) if(debug): print('{1} !!! Visiblity statement({0})'.format(line_number, line.strip())) file_ast.close_file(line_number) From a45965a38c92a74e4277f6226f9895b12c4b1fd4 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Fri, 22 Mar 2019 16:02:47 -0400 Subject: [PATCH 15/44] Add position of object in line to "textDocument/definition" and "textDocument/implementation" results - Remove naming ambiguity between "file" objects and "ast" objects --- fortls/langserver.py | 37 ++++++---- fortls/objects.py | 159 +++++++++++++++------------------------- fortls/parse_fortran.py | 33 +++++++++ 3 files changed, 114 insertions(+), 115 deletions(-) diff --git a/fortls/langserver.py b/fortls/langserver.py index a6a4838..9922265 100644 --- a/fortls/langserver.py +++ b/fortls/langserver.py @@ -283,7 +283,7 @@ def map_types(type): "name": candidate.name, "kind": map_types(candidate.get_type()), "location": { - "uri": path_to_uri(candidate.file.path), + "uri": path_to_uri(candidate.file_ast.path), "range": { "start": {"line": candidate.sline-1, "character": 0}, "end": {"line": candidate.eline-1, "character": 0} @@ -362,7 +362,7 @@ def map_types(type): "uri": uri, "range": { "start": {"line": child.sline-1, "character": 0}, - "end": {"line": child.sline-1, "character": 1} + "end": {"line": child.sline-1, "character": 0} } } tmp_out["containerName"] = scope.name @@ -756,7 +756,7 @@ def serve_references(self, request): if def_obj.parent.get_type() == CLASS_TYPE_ID: type_mem = True else: - restrict_file = self.workspace.get(def_obj.file.path) + restrict_file = def_obj.file_ast.file if restrict_file is None: return [] # Search through all files @@ -791,7 +791,8 @@ def serve_references(self, request): ref_match = True override_cache.append(var_def.FQSN) break - if (var_def.sline-1 == i) and (var_def.file.path == filename) and (line.count("=>") == 0): + if (var_def.sline-1 == i) and (var_def.file_ast.path == filename) \ + and (line.count("=>") == 0): try: if var_def.link_obj is def_obj: ref_match = True @@ -822,13 +823,17 @@ def serve_definition(self, request): if var_obj is None: return None # Construct link reference - if var_obj.file.path is not None: - sline = var_obj.sline-1 + if var_obj.file_ast.file is not None: + var_file = var_obj.file_ast.file + sline, schar, echar = \ + var_file.find_word_in_code_line(var_obj.sline-1, var_obj.name) + if schar < 0: + schar = echar = 0 return { - "uri": path_to_uri(var_obj.file.path), + "uri": path_to_uri(var_file.path), "range": { - "start": {"line": sline, "character": 0}, - "end": {"line": sline, "character": 1} + "start": {"line": sline, "character": schar}, + "end": {"line": sline, "character": echar} } } return None @@ -891,13 +896,17 @@ def serve_implementation(self, request): # Construct implementation reference if var_obj.parent.get_type() == CLASS_TYPE_ID: impl_obj = var_obj.link_obj - if (impl_obj is not None) and (impl_obj.file.path is not None): - sline = impl_obj.sline-1 + if (impl_obj is not None) and (impl_obj.file_ast.file is not None): + impl_file = impl_obj.file_ast.file + sline, schar, echar = \ + impl_file.find_word_in_code_line(impl_obj.sline-1, impl_obj.name) + if schar < 0: + schar = echar = 0 return { - "uri": path_to_uri(impl_obj.file.path), + "uri": path_to_uri(impl_file.path), "range": { - "start": {"line": sline, "character": 0}, - "end": {"line": sline, "character": 1} + "start": {"line": sline, "character": schar}, + "end": {"line": sline, "character": echar} } } return None diff --git a/fortls/objects.py b/fortls/objects.py index 1cf22b1..6b0ab74 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -2,7 +2,6 @@ import re import os from fortls.jsonrpc import path_to_uri -WORD_REGEX = re.compile(r'[a-z_][a-z0-9_]*', re.I) CLASS_VAR_REGEX = re.compile(r'(TYPE|CLASS)[ ]*\(', re.I) DEF_KIND_REGEX = re.compile(r'([a-z]*)[ ]*\((?:KIND|LEN)?[ =]*([a-z_][a-z0-9_]*)', re.I) # Keyword identifiers @@ -209,44 +208,6 @@ def add_children(mod_obj, query): return matching_symbols -def find_word_in_line(line, word): - i0 = -1 - for poss_name in WORD_REGEX.finditer(line): - if poss_name.group() == word: - i0 = poss_name.start() - break - return i0, i0 + len(word) - - -def build_diagnostic(sline, message, severity=1, eline=None, file_contents=None, find_word=None, - related_path=None, related_line=None, related_message=""): - if eline is None: - eline = sline - i0 = i1 = 0 - if (file_contents is not None) and (find_word is not None): - i0, i1 = find_word_in_line(file_contents[sline].lower(), find_word.lower()) - diag = { - "range": { - "start": {"line": sline, "character": i0}, - "end": {"line": eline, "character": i1} - }, - "message": message, - "severity": severity - } - if (related_path is not None) and (related_line is not None): - diag["relatedInformation"] = [{ - "location": { - "uri": path_to_uri(related_path), - "range": { - "start": {"line": related_line, "character": 0}, - "end": {"line": related_line, "character": 0} - } - }, - "message": related_message - }] - return diag - - class fortran_diagnostic: def __init__(self, sline, message, severity=1, find_word=None): self.sline = sline @@ -267,16 +228,11 @@ def add_related(self, path, line, message): def build(self, file_obj): schar = echar = 0 if self.find_word is not None: - _, curr_line, forward_lines = file_obj.get_code_line(self.sline, backward=False) - schar, echar = find_word_in_line(curr_line.lower(), self.find_word.lower()) - if schar < 0: - for (i, line) in enumerate(forward_lines): - schar, echar = find_word_in_line(line.lower(), self.find_word.lower()) - if schar >= 0: - self.sline += i+1 - break - if schar < 0: - schar = echar = 0 + self.sline, found_schar, found_echar = \ + file_obj.find_word_in_code_line(self.sline, self.find_word) + if found_schar >= 0: + schar = found_schar + echar = found_echar diag = { "range": { "start": {"line": self.sline, "character": schar}, @@ -400,11 +356,11 @@ def check_definition(self, obj_tree, known_types={}, interface=False): class fortran_scope(fortran_obj): - def __init__(self, file_obj, line_number, name): - self.base_setup(file_obj, line_number, name) + def __init__(self, file_ast, line_number, name): + self.base_setup(file_ast, line_number, name) - def base_setup(self, file_obj, sline, name, keywords=[]): - self.file = file_obj + def base_setup(self, file_ast, sline, name, keywords=[]): + self.file_ast = file_ast self.sline = sline self.eline = sline self.name = name @@ -420,8 +376,8 @@ def base_setup(self, file_obj, sline, name, keywords=[]): self.doc_str = None self.implicit_vars = None self.implicit_line = None - if file_obj.enc_scope_name is not None: - self.FQSN = file_obj.enc_scope_name.lower() + "::" + self.name.lower() + if file_ast.enc_scope_name is not None: + self.FQSN = file_ast.enc_scope_name.lower() + "::" + self.name.lower() else: self.FQSN = self.name.lower() @@ -531,7 +487,8 @@ def check_definitions(self, obj_tree): line_number, message='Variable "{0}" declared twice in scope'.format(child.name), severity=1, find_word=child.name ) - new_diag.add_related(path=self.file.path, line=FQSN_dict[child.FQSN], message='First declaration') + new_diag.add_related(path=self.file_ast.path, line=FQSN_dict[child.FQSN], + message='First declaration') errors.append(new_diag) continue # Check for masking from parent scope in subroutines, functions, and blocks @@ -600,7 +557,7 @@ def add_subroutine(self, interface_string, no_contains=False): }, "newText": interface_string + "\n" }) - return self.file.path, edits + return self.file_ast.path, edits class fortran_module(fortran_scope): @@ -622,8 +579,8 @@ def get_desc(self): class fortran_submodule(fortran_module): - def __init__(self, file_obj, line_number, name, ancestor_name=None): - self.base_setup(file_obj, line_number, name) + def __init__(self, file_ast, line_number, name, ancestor_name=None): + self.base_setup(file_ast, line_number, name) self.ancestor_name = ancestor_name self.ancestor_obj = None @@ -667,8 +624,8 @@ def resolve_link(self, obj_tree): class fortran_subroutine(fortran_scope): - def __init__(self, file_obj, line_number, name, args="", mod_sub=False, keywords=[]): - self.base_setup(file_obj, line_number, name, keywords=keywords) + def __init__(self, file_ast, line_number, name, args="", mod_sub=False, keywords=[]): + self.base_setup(file_ast, line_number, name, keywords=keywords) self.args = args.replace(' ', '').lower() self.args_snip = self.args self.arg_objs = [] @@ -856,9 +813,9 @@ def get_diagnostics(self): class fortran_function(fortran_subroutine): - def __init__(self, file_obj, line_number, name, args="", + def __init__(self, file_ast, line_number, name, args="", mod_fun=False, keywords=[], return_type=None, result_var=None): - self.base_setup(file_obj, line_number, name, keywords=keywords) + self.base_setup(file_ast, line_number, name, keywords=keywords) self.args = args.replace(' ', '').lower() self.args_snip = self.args self.arg_objs = [] @@ -970,8 +927,8 @@ def get_interface(self, name_replace=None, change_arg=-1, change_strings=None): class fortran_type(fortran_scope): - def __init__(self, file_obj, line_number, name, keywords): - self.base_setup(file_obj, line_number, name, keywords=keywords) + def __init__(self, file_ast, line_number, name, keywords): + self.base_setup(file_ast, line_number, name, keywords=keywords) # self.in_children = [] self.inherit = None @@ -1067,7 +1024,7 @@ def get_actions(self, sline, eline): # diagnostics = [] has_edits = False - file_uri = path_to_uri(self.file.path) + file_uri = path_to_uri(self.file_ast.path) for in_child in self.in_children: if in_child.keywords.count(KEYWORD_ID_DICT['deferred']) > 0: # Get interface @@ -1078,7 +1035,7 @@ def get_actions(self, sline, eline): if interface_string is None: continue interface_path, interface_edits = self.parent.add_subroutine(interface_string, no_contains=has_edits) - if interface_path != self.file.path: + if interface_path != self.file_ast.path: continue edits.append({ "range": { @@ -1112,8 +1069,8 @@ def get_actions(self, sline, eline): class fortran_block(fortran_scope): - def __init__(self, file_obj, line_number, name): - self.base_setup(file_obj, line_number, name) + def __init__(self, file_ast, line_number, name): + self.base_setup(file_ast, line_number, name) def get_type(self): return BLOCK_TYPE_ID @@ -1129,8 +1086,8 @@ def req_named_end(self): class fortran_do(fortran_block): - def __init__(self, file_obj, line_number, name): - self.base_setup(file_obj, line_number, name) + def __init__(self, file_ast, line_number, name): + self.base_setup(file_ast, line_number, name) def get_type(self): return DO_TYPE_ID @@ -1140,8 +1097,8 @@ def get_desc(self): class fortran_where(fortran_block): - def __init__(self, file_obj, line_number, name): - self.base_setup(file_obj, line_number, name) + def __init__(self, file_ast, line_number, name): + self.base_setup(file_ast, line_number, name) def get_type(self): return WHERE_TYPE_ID @@ -1151,8 +1108,8 @@ def get_desc(self): class fortran_if(fortran_block): - def __init__(self, file_obj, line_number, name): - self.base_setup(file_obj, line_number, name) + def __init__(self, file_ast, line_number, name): + self.base_setup(file_ast, line_number, name) def get_type(self): return IF_TYPE_ID @@ -1162,8 +1119,8 @@ def get_desc(self): class fortran_associate(fortran_block): - def __init__(self, file_obj, line_number, name): - self.base_setup(file_obj, line_number, name) + def __init__(self, file_ast, line_number, name): + self.base_setup(file_ast, line_number, name) def get_type(self): return ASSOC_TYPE_ID @@ -1173,8 +1130,8 @@ def get_desc(self): class fortran_enum(fortran_block): - def __init__(self, file_obj, line_number, name): - self.base_setup(file_obj, line_number, name) + def __init__(self, file_ast, line_number, name): + self.base_setup(file_ast, line_number, name) def get_type(self): return ENUM_TYPE_ID @@ -1184,8 +1141,8 @@ def get_desc(self): class fortran_select(fortran_block): - def __init__(self, file_obj, line_number, name, select_info): - self.base_setup(file_obj, line_number, name) + def __init__(self, file_ast, line_number, name, select_info): + self.base_setup(file_ast, line_number, name) self.select_type = select_info[0] self.binding_name = None self.bound_var = None @@ -1200,10 +1157,10 @@ def __init__(self, file_obj, line_number, name, select_info): elif self.select_type == 3: self.binding_type = select_info[1] # Close previous "TYPE IS" region if open - if (file_obj.current_scope is not None) \ - and (file_obj.current_scope.get_type() == SELECT_TYPE_ID)\ - and file_obj.current_scope.is_type_region(): - file_obj.end_scope(line_number) + if (file_ast.current_scope is not None) \ + and (file_ast.current_scope.get_type() == SELECT_TYPE_ID)\ + and file_ast.current_scope.is_type_region(): + file_ast.end_scope(line_number) def get_type(self): return SELECT_TYPE_ID @@ -1217,7 +1174,7 @@ def is_type_binding(self): def is_type_region(self): return ((self.select_type == 3) or (self.select_type == 4)) - def create_binding_variable(self, file_obj, line_number, var_desc, case_type): + def create_binding_variable(self, file_ast, line_number, var_desc, case_type): if self.parent.get_type() != SELECT_TYPE_ID: return None binding_name = None @@ -1230,15 +1187,15 @@ def create_binding_variable(self, file_obj, line_number, var_desc, case_type): bound_var = None # Create variable if binding_name is not None: - return fortran_var(file_obj, line_number, binding_name, var_desc, [], link_obj=bound_var) + return fortran_var(file_ast, line_number, binding_name, var_desc, [], link_obj=bound_var) elif (binding_name is None) and (bound_var is not None): - return fortran_var(file_obj, line_number, bound_var, var_desc, []) + return fortran_var(file_ast, line_number, bound_var, var_desc, []) return None class fortran_int(fortran_scope): - def __init__(self, file_obj, line_number, name, abstract=False): - self.base_setup(file_obj, line_number, name) + def __init__(self, file_ast, line_number, name, abstract=False): + self.base_setup(file_ast, line_number, name) self.mems = [] self.abstract = abstract self.external = name.startswith('#GEN_INT') and (not abstract) @@ -1271,14 +1228,14 @@ def resolve_link(self, obj_tree): class fortran_var(fortran_obj): - def __init__(self, file_obj, line_number, name, var_desc, keywords, + def __init__(self, file_ast, line_number, name, var_desc, keywords, keyword_info={}, link_obj=None): - self.base_setup(file_obj, line_number, name, var_desc, keywords, + self.base_setup(file_ast, line_number, name, var_desc, keywords, keyword_info, link_obj) - def base_setup(self, file_obj, line_number, name, var_desc, keywords, + def base_setup(self, file_ast, line_number, name, var_desc, keywords, keyword_info, link_obj): - self.file = file_obj + self.file_ast = file_ast self.sline = line_number self.eline = line_number self.name = name @@ -1296,8 +1253,8 @@ def base_setup(self, file_obj, line_number, name, var_desc, keywords, self.link_name = link_obj.lower() else: self.link_name = None - if file_obj.enc_scope_name is not None: - self.FQSN = file_obj.enc_scope_name.lower() + "::" + self.name.lower() + if file_ast.enc_scope_name is not None: + self.FQSN = file_ast.enc_scope_name.lower() + "::" + self.name.lower() else: self.FQSN = self.name.lower() if self.keywords.count(KEYWORD_ID_DICT['public']) > 0: @@ -1407,9 +1364,9 @@ def check_definition(self, obj_tree, known_types={}, interface=False): class fortran_meth(fortran_var): - def __init__(self, file_obj, line_number, name, var_desc, keywords, + def __init__(self, file_ast, line_number, name, var_desc, keywords, keyword_info, link_obj=None): - self.base_setup(file_obj, line_number, name, var_desc, keywords, + self.base_setup(file_ast, line_number, name, var_desc, keywords, keyword_info, link_obj) self.drop_arg = -1 self.pass_name = keyword_info.get('pass') @@ -1692,7 +1649,7 @@ def get_object(self, FQSN): return curr_obj def resolve_includes(self, workspace, path=None): - file_dir = os.path.dirname(self.file.path) + file_dir = os.path.dirname(self.path) for include_path in self.include_stmnts: file_path = os.path.normpath(os.path.join(file_dir, include_path[1])) if path is not None: @@ -1754,10 +1711,10 @@ def check_file(self, obj_tree): message = 'Unexpected end of scope at line {0}'.format(error[0]) else: message = 'Unexpected end statement: No open scopes' - errors.append(build_diagnostic(error[1]-1, message=message, severity=1)) + errors.append(fortran_diagnostic(error[1]-1, message=message, severity=1)) for scope in tmp_list: if not scope.check_valid_parent(): - errors.append(build_diagnostic( + errors.append(fortran_diagnostic( scope.sline-1, message='Invalid parent for "{0}" declaration'.format(scope.get_desc()), severity=1 )) diff --git a/fortls/parse_fortran.py b/fortls/parse_fortran.py index 83ef231..56be755 100644 --- a/fortls/parse_fortran.py +++ b/fortls/parse_fortran.py @@ -313,6 +313,16 @@ def separate_def_list(test_str): return def_list +def find_word_in_line(line, word): + """Find Fortran word in line""" + i0 = -1 + for poss_name in WORD_REGEX.finditer(line): + if poss_name.group() == word: + i0 = poss_name.start() + break + return i0, i0 + len(word) + + def find_paren_match(test_str): """Find matching closing parenthesis by searching forward, returns -1 if no match is found""" @@ -984,6 +994,29 @@ def strip_comment(self, line): line = line.split('!')[0] return line + def find_word_in_code_line(self, line_number, find_word, forward=True, backward=False, pp_content=False): + back_lines, curr_line, forward_lines = self.get_code_line( + line_number, forward=forward, backward=backward, pp_content=pp_content + ) + i0 = i1 = -1 + if curr_line is not None: + find_word_lower = find_word.lower() + i0, i1 = find_word_in_line(curr_line.lower(), find_word_lower) + if backward and (i0 < 0): + back_lines.reverse() + for (i, line) in enumerate(back_lines): + i0, i1 = find_word_in_line(line.lower(), find_word_lower) + if i0 >= 0: + line_number -= i+1 + return line_number, i0, i1 + if forward and (i0 < 0): + for (i, line) in enumerate(forward_lines): + i0, i1 = find_word_in_line(line.lower(), find_word_lower) + if i0 >= 0: + line_number += i+1 + return line_number, i0, i1 + return line_number, i0, i1 + def preprocess(self, pp_defs=None, debug=False): # Look for and mark excluded preprocessor paths in file # Initial implementation only looks for "if" and "ifndef" statements. From 36d5793571f905eda492211fdcdefee9216955a2 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Sat, 23 Mar 2019 12:26:35 -0400 Subject: [PATCH 16/44] Release version 1.7.0 --- CHANGELOG.md | 17 +++++++++++++++++ README.rst | 2 +- fortls/__init__.py | 2 +- setup.py | 4 ++-- 4 files changed, 21 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dc968a8..4ba149c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,20 @@ +## 1.7.0 + +### Improvements +* Add initial support for "textDocument/codeAction" requests, generate unimplemented deferred procedures +* Show subroutine/function keywords ("PURE", "ELEMENTAL", etc.) +* Add position of object in line to "textDocument/definition" and "textDocument/implementation" results +* Diagnostics: CONTAINS statement placement errors +* Diagnostics: Visibility statement placement errors +* Command line options: Notify when workspace initialization is complete +* Command line options: Set number of threads used during initialization +* Significant refactoring of core code + +### Fixes +* Fix "RecursionError" exception with circular user-defined type references, fixes [#100](https://github.com/hansec/fortran-language-server/issues/100) +* Fix bug detecting TYPE definitions with an immediately following colon, ref [#100](https://github.com/hansec/fortran-language-server/issues/100) +* Fix incorrect diagnostics for interface statements with USE instead of IMPORT statements + ## 1.6.0 ### Improvements diff --git a/README.rst b/README.rst index 7a60ac9..b442016 100644 --- a/README.rst +++ b/README.rst @@ -45,7 +45,7 @@ Language Server Features - Use of unimported variables/objects in interface blocks - Statement placement errors ("CONTAINS", "IMPLICIT", "IMPORT") -- Code actions (``textDocument/codeAction``) [Experimental, must be enabled in settings] +- Code actions (``textDocument/codeAction``) [Experimental] - Generate type-bound procedures and implementation templates for deferred procedures diff --git a/fortls/__init__.py b/fortls/__init__.py index 19891e0..45aef91 100644 --- a/fortls/__init__.py +++ b/fortls/__init__.py @@ -6,7 +6,7 @@ from .langserver import LangServer from .jsonrpc import JSONRPC2Connection, ReadWriter, path_from_uri from .parse_fortran import fortran_file, process_file -__version__ = '1.6.0' +__version__ = '1.7.0' def error_exit(error_str): diff --git a/setup.py b/setup.py index 308ed1b..689804e 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ # Versions should comply with PEP440. For a discussion on single-sourcing # the version across setup.py and the project code, see # https://packaging.python.org/en/latest/single_source_version.html - version='1.6.0', + version='1.7.0', description='FORTRAN Language Server for the Language Server Protocol', @@ -17,7 +17,7 @@ # The project's main homepage. url='https://github.com/hansec/fortran-language-server', - download_url = 'https://github.com/hansec/fortran-language-server/archive/v1.6.0.tar.gz', + download_url = 'https://github.com/hansec/fortran-language-server/archive/v1.7.0.tar.gz', author='Chris Hansen', author_email = 'hansec@uw.edu', From c8209ab9cd467afb76e9e29a408a309b078da5f6 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Sat, 23 Mar 2019 17:27:02 -0400 Subject: [PATCH 17/44] Fix bug with completion and signatureHelp requests on continuation lines introduced in v1.7 --- fortls/langserver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fortls/langserver.py b/fortls/langserver.py index 9922265..3842345 100644 --- a/fortls/langserver.py +++ b/fortls/langserver.py @@ -464,7 +464,7 @@ def build_comp(candidate, name_only=False, name_replace=None, is_interface=False ac_line = params["position"]["line"] ac_char = params["position"]["character"] # Get full line (and possible continuations) from file - pre_lines, curr_line, _ = file_obj.get_code_line(ac_line, backward=False, strip_comment=True) + pre_lines, curr_line, _ = file_obj.get_code_line(ac_line, forward=False, strip_comment=True) line_prefix = get_line_prefix(pre_lines, curr_line, ac_char) is_member = False try: @@ -662,7 +662,7 @@ def check_optional(arg, params): sig_line = params["position"]["line"] sig_char = params["position"]["character"] # Get full line (and possible continuations) from file - pre_lines, curr_line, _ = file_obj.get_code_line(sig_line, backward=False, strip_comment=True) + pre_lines, curr_line, _ = file_obj.get_code_line(sig_line, forward=False, strip_comment=True) line_prefix = get_line_prefix(pre_lines, curr_line, sig_char) # Test if scope declaration or end statement if SCOPE_DEF_REGEX.match(curr_line) or END_REGEX.match(curr_line): From 10ed6080ac23b91e31592d2b3ca3e3119f5ce1ca Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Sat, 23 Mar 2019 17:28:02 -0400 Subject: [PATCH 18/44] Fix out-of-range error with various requests on zero-length lines introduced in v1.7 --- fortls/langserver.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/fortls/langserver.py b/fortls/langserver.py index 3842345..b4561da 100644 --- a/fortls/langserver.py +++ b/fortls/langserver.py @@ -41,8 +41,8 @@ def init_file(filepath, pp_defs): def get_line_prefix(pre_lines, curr_line, iChar): - # Get full line (and possible continuations) from file - if (curr_line is None) or (iChar > len(curr_line)) or (curr_line[0] == '#'): + """Get code line prefix from current line and preceeding continuation lines""" + if (curr_line is None) or (iChar > len(curr_line)) or (curr_line.startswith('#')): return None prepend_string = ''.join(pre_lines) curr_line = prepend_string + curr_line @@ -466,6 +466,8 @@ def build_comp(candidate, name_only=False, name_replace=None, is_interface=False # Get full line (and possible continuations) from file pre_lines, curr_line, _ = file_obj.get_code_line(ac_line, forward=False, strip_comment=True) line_prefix = get_line_prefix(pre_lines, curr_line, ac_char) + if line_prefix is None: + return req_dict is_member = False try: var_stack = get_var_stack(line_prefix) @@ -594,6 +596,8 @@ def get_definition(self, def_file, def_line, def_char): # Get full line (and possible continuations) from file pre_lines, curr_line, _ = def_file.get_code_line(def_line, forward=False, strip_comment=True) line_prefix = get_line_prefix(pre_lines, curr_line, def_char) + if line_prefix is None: + return None is_member = False try: var_stack = get_var_stack(line_prefix) @@ -664,6 +668,8 @@ def check_optional(arg, params): # Get full line (and possible continuations) from file pre_lines, curr_line, _ = file_obj.get_code_line(sig_line, forward=False, strip_comment=True) line_prefix = get_line_prefix(pre_lines, curr_line, sig_char) + if line_prefix is None: + return req_dict # Test if scope declaration or end statement if SCOPE_DEF_REGEX.match(curr_line) or END_REGEX.match(curr_line): return req_dict From 3ed4990810043ef000faeed156796bdabbe0d6e9 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Sat, 23 Mar 2019 17:30:13 -0400 Subject: [PATCH 19/44] Release version 1.7.1 --- CHANGELOG.md | 6 ++++++ fortls/__init__.py | 2 +- setup.py | 4 ++-- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4ba149c..fa96dca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +## 1.7.1 + +### Fixes +* Fix bug with completion and signatureHelp requests on continuation lines (introduced in v1.7) +* Fix out-of-range error with various requests on zero-length lines (introduced in v1.7) + ## 1.7.0 ### Improvements diff --git a/fortls/__init__.py b/fortls/__init__.py index 45aef91..328d60a 100644 --- a/fortls/__init__.py +++ b/fortls/__init__.py @@ -6,7 +6,7 @@ from .langserver import LangServer from .jsonrpc import JSONRPC2Connection, ReadWriter, path_from_uri from .parse_fortran import fortran_file, process_file -__version__ = '1.7.0' +__version__ = '1.7.1' def error_exit(error_str): diff --git a/setup.py b/setup.py index 689804e..95bd7aa 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ # Versions should comply with PEP440. For a discussion on single-sourcing # the version across setup.py and the project code, see # https://packaging.python.org/en/latest/single_source_version.html - version='1.7.0', + version='1.7.1', description='FORTRAN Language Server for the Language Server Protocol', @@ -17,7 +17,7 @@ # The project's main homepage. url='https://github.com/hansec/fortran-language-server', - download_url = 'https://github.com/hansec/fortran-language-server/archive/v1.7.0.tar.gz', + download_url = 'https://github.com/hansec/fortran-language-server/archive/v1.7.1.tar.gz', author='Chris Hansen', author_email = 'hansec@uw.edu', From 4e5ed8bd71753bfe6c921e05a878a8db55af4d41 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Sat, 23 Mar 2019 17:50:00 -0400 Subject: [PATCH 20/44] Fix bug with various requests involving intrinsic functions/modules/variables introduced in v1.7 --- fortls/intrinsics.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/fortls/intrinsics.py b/fortls/intrinsics.py index a357f1b..a9ee36d 100644 --- a/fortls/intrinsics.py +++ b/fortls/intrinsics.py @@ -1,9 +1,8 @@ import os import json -from fortls.parse_fortran import fortran_file -from fortls.objects import fortran_module, fortran_subroutine, \ +from fortls.objects import fortran_ast, fortran_module, fortran_subroutine, \ fortran_function, fortran_type, fortran_var, fortran_obj, map_keywords -none_file = fortran_file() +none_ast = fortran_ast() lowercase_intrinsics = False @@ -19,7 +18,7 @@ def __init__(self, name, type, doc_str=None, args="", parent=None): self.doc_str = doc_str self.args = args.replace(' ', '') self.parent = parent - self.file = none_file + self.file_ast = none_ast if lowercase_intrinsics: self.name = self.name.lower() self.args = self.args.lower() @@ -93,9 +92,9 @@ def create_int_object(name, json_obj, type): def create_object(json_obj, enc_obj=None): if enc_obj is not None: - none_file.enc_scope_name = enc_obj.FQSN + none_ast.enc_scope_name = enc_obj.FQSN else: - none_file.enc_scope_name = None + none_ast.enc_scope_name = None if "mods" in json_obj: keywords, keyword_info = map_keywords(json_obj["mods"]) else: @@ -107,19 +106,19 @@ def create_object(json_obj, enc_obj=None): name = name.lower() args = args.lower() if json_obj["type"] == 0: - mod_tmp = fortran_module(none_file, 0, name) + mod_tmp = fortran_module(none_ast, 0, name) if "use" in json_obj: mod_tmp.add_use(json_obj["use"], 0) return mod_tmp elif json_obj["type"] == 1: - return fortran_subroutine(none_file, 0, name, args=args) + return fortran_subroutine(none_ast, 0, name, args=args) elif json_obj["type"] == 2: - return fortran_function(none_file, 0, name, + return fortran_function(none_ast, 0, name, args=args, return_type=[json_obj["return"], keywords, keyword_info]) elif json_obj["type"] == 3: - return fortran_var(none_file, 0, name, json_obj["desc"], keywords, keyword_info) + return fortran_var(none_ast, 0, name, json_obj["desc"], keywords, keyword_info) elif json_obj["type"] == 4: - return fortran_type(none_file, 0, name, keywords) + return fortran_type(none_ast, 0, name, keywords) else: raise ValueError From c3b06a1ae3bc68fce53de39dce94f3df293f1fc9 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Sun, 24 Mar 2019 10:39:07 -0400 Subject: [PATCH 21/44] Release version 1.7.2 --- CHANGELOG.md | 5 +++++ fortls/__init__.py | 2 +- setup.py | 4 ++-- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fa96dca..25c560f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +## 1.7.2 + +### Fixes +* Fix bug with definition/hover requests involving intrinsic functions/modules/variables (introduced in v1.7) + ## 1.7.1 ### Fixes diff --git a/fortls/__init__.py b/fortls/__init__.py index 328d60a..ef3f065 100644 --- a/fortls/__init__.py +++ b/fortls/__init__.py @@ -6,7 +6,7 @@ from .langserver import LangServer from .jsonrpc import JSONRPC2Connection, ReadWriter, path_from_uri from .parse_fortran import fortran_file, process_file -__version__ = '1.7.1' +__version__ = '1.7.2' def error_exit(error_str): diff --git a/setup.py b/setup.py index 95bd7aa..fd55b95 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ # Versions should comply with PEP440. For a discussion on single-sourcing # the version across setup.py and the project code, see # https://packaging.python.org/en/latest/single_source_version.html - version='1.7.1', + version='1.7.2', description='FORTRAN Language Server for the Language Server Protocol', @@ -17,7 +17,7 @@ # The project's main homepage. url='https://github.com/hansec/fortran-language-server', - download_url = 'https://github.com/hansec/fortran-language-server/archive/v1.7.1.tar.gz', + download_url = 'https://github.com/hansec/fortran-language-server/archive/v1.7.2.tar.gz', author='Chris Hansen', author_email = 'hansec@uw.edu', From f2b4dbe13d1ce5d4a1f3b97fee57263eeb7ece8c Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Mon, 25 Mar 2019 20:48:24 -0400 Subject: [PATCH 22/44] Fix bug in diagnostics construction/reporting introduced in v1.7 --- fortls/objects.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/fortls/objects.py b/fortls/objects.py index 6b0ab74..63225d6 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -503,7 +503,7 @@ def check_definitions(self, obj_tree): line_number, message='Variable "{0}" masks variable in parent scope'.format(child.name), severity=2, find_word=child.name ) - new_diag.add_related(path=parent_var.file.path, line=parent_var.sline-1, + new_diag.add_related(path=parent_var.file_ast.path, line=parent_var.sline-1, message='First declaration') errors.append(new_diag) return errors @@ -1002,7 +1002,7 @@ def get_diagnostics(self): self.eline - 1, 'Deferred procedure "{0}" not implemented'.format(in_child.name), severity=1 ) - new_diag.add_related(path=in_child.file.path, line=in_child.sline-1, + new_diag.add_related(path=in_child.file_ast.path, line=in_child.sline-1, message='Inherited procedure declaration') errors.append(new_diag) return errors @@ -1049,7 +1049,7 @@ def get_actions(self, sline, eline): line_number, 'Deferred procedure "{0}" not implemented'.format(in_child.name), severity=1 ) - new_diag.add_related(path=in_child.file.path, line=in_child.sline-1, + new_diag.add_related(path=in_child.file_ast.path, line=in_child.sline-1, message='Inherited procedure declaration') diagnostics.append(new_diag) has_edits = True @@ -1353,7 +1353,8 @@ def check_definition(self, obj_tree, known_types={}, interface=False): severity=1, find_word=desc_obj_name ) type_def = type_info[1] - out_diag.add_related(path=type_def.file.path, line=type_def.sline-1, message='Possible object') + out_diag.add_related(path=type_def.file_ast.path, + line=type_def.sline-1, message='Possible object') else: out_diag = fortran_diagnostic( self.sline-1, message='Object "{0}" not imported in interface'.format(desc_obj_name), @@ -1694,11 +1695,12 @@ def close_file(self, line_number): def check_file(self, obj_tree): errors = [] + diagnostics = [] tmp_list = self.scope_list[:] if self.none_scope is not None: tmp_list += [self.none_scope] for error in self.parse_errors: - errors.append({ + diagnostics.append({ "range": { "start": {"line": error["line"]-1, "character": error["schar"]}, "end": {"line": error["line"]-1, "character": error["echar"]} @@ -1721,7 +1723,6 @@ def check_file(self, obj_tree): errors += scope.check_use(obj_tree) errors += scope.check_definitions(obj_tree) errors += scope.get_diagnostics() - diagnostics = [] for error in errors: diagnostics.append(error.build(self.file)) return diagnostics From 0f24562156a636f0c3a59538b15cbde90e7fd89e Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Tue, 26 Mar 2019 18:01:31 -0400 Subject: [PATCH 23/44] Fix incorrect "CONTAINS" diagnostic errors with procedure pointers --- fortls/objects.py | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/fortls/objects.py b/fortls/objects.py index 63225d6..f304148 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -288,7 +288,7 @@ def resolve_inherit(self, obj_tree): def resolve_link(self, obj_tree): return None - def get_type(self): + def get_type(self, no_link=False): return -1 def get_desc(self): @@ -451,6 +451,7 @@ def check_definitions(self, obj_tree): FQSN_dict[child.FQSN] = child.sline - 1 # contains_line = -1 + after_contains_list = (SUBROUTINE_TYPE_ID, FUNCTION_TYPE_ID) if self.get_type() in (MODULE_TYPE_ID, SUBROUTINE_TYPE_ID, FUNCTION_TYPE_ID): if self.contains_start is None: contains_line = self.eline @@ -471,7 +472,7 @@ def check_definitions(self, obj_tree): if def_error is not None: errors.append(def_error) # Detect contains errors - if (contains_line >= child.sline) and (child.get_type() in (SUBROUTINE_TYPE_ID, FUNCTION_TYPE_ID)): + if (contains_line >= child.sline) and (child.get_type(no_link=True) in after_contains_list): new_diag = fortran_diagnostic( line_number, message='Subroutine/Function definition before CONTAINS statement', severity=1 @@ -561,7 +562,7 @@ def add_subroutine(self, interface_string, no_contains=False): class fortran_module(fortran_scope): - def get_type(self): + def get_type(self, no_link=False): return MODULE_TYPE_ID def get_desc(self): @@ -690,7 +691,7 @@ def resolve_arg_link(self, obj_tree): def resolve_link(self, obj_tree): self.resolve_arg_link(obj_tree) - def get_type(self): + def get_type(self, no_link=False): return SUBROUTINE_TYPE_ID def get_snippet(self, name_replace=None, drop_arg=-1): @@ -859,7 +860,7 @@ def resolve_link(self, obj_tree): if child.name.lower() == result_var_lower: self.result_obj = child - def get_type(self): + def get_type(self, no_link=False): return FUNCTION_TYPE_ID def get_desc(self): @@ -939,7 +940,7 @@ def __init__(self, file_ast, line_number, name, keywords): if self.keywords.count(KEYWORD_ID_DICT['private']) > 0: self.vis = -1 - def get_type(self): + def get_type(self, no_link=False): return CLASS_TYPE_ID def get_desc(self): @@ -1072,7 +1073,7 @@ class fortran_block(fortran_scope): def __init__(self, file_ast, line_number, name): self.base_setup(file_ast, line_number, name) - def get_type(self): + def get_type(self, no_link=False): return BLOCK_TYPE_ID def get_desc(self): @@ -1089,7 +1090,7 @@ class fortran_do(fortran_block): def __init__(self, file_ast, line_number, name): self.base_setup(file_ast, line_number, name) - def get_type(self): + def get_type(self, no_link=False): return DO_TYPE_ID def get_desc(self): @@ -1100,7 +1101,7 @@ class fortran_where(fortran_block): def __init__(self, file_ast, line_number, name): self.base_setup(file_ast, line_number, name) - def get_type(self): + def get_type(self, no_link=False): return WHERE_TYPE_ID def get_desc(self): @@ -1111,7 +1112,7 @@ class fortran_if(fortran_block): def __init__(self, file_ast, line_number, name): self.base_setup(file_ast, line_number, name) - def get_type(self): + def get_type(self, no_link=False): return IF_TYPE_ID def get_desc(self): @@ -1122,7 +1123,7 @@ class fortran_associate(fortran_block): def __init__(self, file_ast, line_number, name): self.base_setup(file_ast, line_number, name) - def get_type(self): + def get_type(self, no_link=False): return ASSOC_TYPE_ID def get_desc(self): @@ -1133,7 +1134,7 @@ class fortran_enum(fortran_block): def __init__(self, file_ast, line_number, name): self.base_setup(file_ast, line_number, name) - def get_type(self): + def get_type(self, no_link=False): return ENUM_TYPE_ID def get_desc(self): @@ -1162,7 +1163,7 @@ def __init__(self, file_ast, line_number, name, select_info): and file_ast.current_scope.is_type_region(): file_ast.end_scope(line_number) - def get_type(self): + def get_type(self, no_link=False): return SELECT_TYPE_ID def get_desc(self): @@ -1200,7 +1201,7 @@ def __init__(self, file_ast, line_number, name, abstract=False): self.abstract = abstract self.external = name.startswith('#GEN_INT') and (not abstract) - def get_type(self): + def get_type(self, no_link=False): return INTERFACE_TYPE_ID def get_desc(self): @@ -1278,8 +1279,8 @@ def resolve_link(self, obj_tree): if link_obj is not None: self.link_obj = link_obj - def get_type(self): - if self.link_obj is not None: + def get_type(self, no_link=False): + if (not no_link) and (self.link_obj is not None): return self.link_obj.get_type() # Normal variable return VAR_TYPE_ID @@ -1392,8 +1393,8 @@ def get_snippet(self, name_replace=None, drop_arg=-1): return self.link_obj.get_snippet(name, self.drop_arg) return None, None - def get_type(self): - if self.link_obj is not None: + def get_type(self, no_link=False): + if (not no_link) and (self.link_obj is not None): return self.link_obj.get_type() # Generic return METH_TYPE_ID From b1633d4de5522f89d35588d60865f7282e798ed7 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Sun, 31 Mar 2019 11:46:04 -0400 Subject: [PATCH 24/44] Fix rename requests for type-bound procedures without an explicit link statement (ie. "=>"), fixes #104 --- fortls/__init__.py | 4 +- fortls/langserver.py | 154 ++++++++++++++++++++++++++++++------------- test/test_server.py | 6 +- 3 files changed, 112 insertions(+), 52 deletions(-) diff --git a/fortls/__init__.py b/fortls/__init__.py index ef3f065..d96b51c 100644 --- a/fortls/__init__.py +++ b/fortls/__init__.py @@ -451,7 +451,7 @@ def main(): print('File: "{0}"'.format(path)) file_obj = s.workspace.get(path) if file_obj is not None: - file_contents = file_obj['contents'] + file_contents = file_obj.contents_split for change in result: start_line = change['range']['start']['line'] end_line = change['range']['end']['line'] @@ -463,7 +463,7 @@ def main(): line = file_contents[i] print(' - {0}'.format(line)) if i == start_line: - new_contents.append(line[:start_col] + args.debug_rename) + new_contents.append(line[:start_col] + change['newText']) if i == end_line: new_contents[-1] += line[end_col:] for line in new_contents: diff --git a/fortls/langserver.py b/fortls/langserver.py index b4561da..d2b1c5d 100644 --- a/fortls/langserver.py +++ b/fortls/langserver.py @@ -8,7 +8,7 @@ get_var_stack, climb_type_tree, expand_name, get_line_context from fortls.objects import find_in_scope, find_in_workspace, get_use_tree, \ set_keyword_ordering, MODULE_TYPE_ID, SUBROUTINE_TYPE_ID, FUNCTION_TYPE_ID, \ - CLASS_TYPE_ID, INTERFACE_TYPE_ID, SELECT_TYPE_ID + CLASS_TYPE_ID, INTERFACE_TYPE_ID, SELECT_TYPE_ID, METH_TYPE_ID from fortls.intrinsics import get_intrinsic_keywords, load_intrinsics, \ set_lowercase_intrinsics @@ -741,41 +741,20 @@ def check_optional(arg, params): } return req_dict - def serve_references(self, request): - # Get parameters from request - params = request["params"] - uri = params["textDocument"]["uri"] - def_line = params["position"]["line"] - def_char = params["position"]["character"] - path = path_from_uri(uri) - file_obj = self.workspace.get(path) - if file_obj is None: - return [] - # Find object - def_obj = self.get_definition(file_obj, def_line, def_char) - if def_obj is None: - return [] - # - restrict_file = None - type_mem = False - if def_obj.FQSN.count(":") > 2: - if def_obj.parent.get_type() == CLASS_TYPE_ID: - type_mem = True - else: - restrict_file = def_obj.file_ast.file - if restrict_file is None: - return [] + def get_all_references(self, def_obj, type_mem, file_obj=None): # Search through all files def_name = def_obj.name.lower() def_fqsn = def_obj.FQSN NAME_REGEX = re.compile(r'(?:\W|^)({0})(?:\W|$)'.format(def_name), re.I) - if restrict_file is None: + if file_obj is None: file_set = self.workspace.items() else: - file_set = ((restrict_file.path, restrict_file), ) + file_set = ((file_obj.path, file_obj), ) override_cache = [] - refs = [] - for filename, file_obj in sorted(file_set): + refs = {} + ref_objs = [] + for filename, file_obj in file_set: + file_refs = [] # Search through file line by line for (i, line) in enumerate(file_obj.contents_split): if len(line) == 0: @@ -801,17 +780,51 @@ def serve_references(self, request): and (line.count("=>") == 0): try: if var_def.link_obj is def_obj: + ref_objs.append(var_def) ref_match = True except: pass if ref_match: - refs.append({ - "uri": path_to_uri(filename), - "range": { - "start": {"line": i, "character": match.start(1)}, - "end": {"line": i, "character": match.end(1)} - } - }) + file_refs.append([i, match.start(1), match.end(1)]) + if len(file_refs) > 0: + refs[filename] = file_refs + return refs, ref_objs + + def serve_references(self, request): + # Get parameters from request + params = request["params"] + uri = params["textDocument"]["uri"] + def_line = params["position"]["line"] + def_char = params["position"]["character"] + path = path_from_uri(uri) + # Find object + file_obj = self.workspace.get(path) + if file_obj is None: + return None + def_obj = self.get_definition(file_obj, def_line, def_char) + if def_obj is None: + return None + # Determine global accesibility and type membership + restrict_file = None + type_mem = False + if def_obj.FQSN.count(":") > 2: + if def_obj.parent.get_type() == CLASS_TYPE_ID: + type_mem = True + else: + restrict_file = def_obj.file_ast.file + if restrict_file is None: + return None + all_refs, _ = self.get_all_references(def_obj, type_mem, file_obj=restrict_file) + refs = [] + for (filename, file_refs) in all_refs.items(): + for ref in file_refs: + refs.append({ + "uri": path_to_uri(filename), + "range": { + "start": {"line": ref[0], "character": ref[1]}, + "end": {"line": ref[0], "character": ref[2]} + } + }) return refs def serve_definition(self, request): @@ -821,10 +834,10 @@ def serve_definition(self, request): def_line = params["position"]["line"] def_char = params["position"]["character"] path = path_from_uri(uri) + # Find object file_obj = self.workspace.get(path) if file_obj is None: return None - # Find object var_obj = self.get_definition(file_obj, def_line, def_char) if var_obj is None: return None @@ -918,20 +931,67 @@ def serve_implementation(self, request): return None def serve_rename(self, request): - all_refs = self.serve_references(request) - if all_refs is None: + # Get parameters from request + params = request["params"] + uri = params["textDocument"]["uri"] + def_line = params["position"]["line"] + def_char = params["position"]["character"] + path = path_from_uri(uri) + # Find object + file_obj = self.workspace.get(path) + if file_obj is None: + return None + def_obj = self.get_definition(file_obj, def_line, def_char) + if def_obj is None: + return None + # Determine global accesibility and type membership + restrict_file = None + type_mem = False + if def_obj.FQSN.count(":") > 2: + if def_obj.parent.get_type() == CLASS_TYPE_ID: + type_mem = True + else: + restrict_file = def_obj.file_ast.file + if restrict_file is None: + return None + all_refs, ref_objs = self.get_all_references(def_obj, type_mem, file_obj=restrict_file) + if len(all_refs) == 0: self.post_message('Rename failed: No usages found to rename', type=2) return None - params = request["params"] + # Create rename changes new_name = params["newName"] changes = {} - for ref in all_refs: - if ref["uri"] not in changes: - changes[ref["uri"]] = [] - changes[ref["uri"]].append({ - "range": ref["range"], - "newText": new_name - }) + for (filename, file_refs) in all_refs.items(): + file_uri = path_to_uri(filename) + changes[file_uri] = [] + for ref in file_refs: + changes[file_uri].append({ + "range": { + "start": {"line": ref[0], "character": ref[1]}, + "end": {"line": ref[0], "character": ref[2]} + }, + "newText": new_name + }) + # Check for implicit procedure implementation naming + bind_obj = None + if def_obj.get_type(no_link=True) == METH_TYPE_ID: + _, curr_line, post_lines = def_obj.file_ast.file.get_code_line( + def_obj.sline-1, backward=False, strip_comment=True + ) + if curr_line is not None: + full_line = curr_line + ''.join(post_lines) + if full_line.find('=>') < 0: + bind_obj = def_obj + bind_change = "{0} => {1}".format(new_name, def_obj.name) + elif (len(ref_objs) > 0) and (ref_objs[0].get_type(no_link=True) == METH_TYPE_ID): + bind_obj = ref_objs[0] + bind_change = "{0} => {1}".format(ref_objs[0].name, new_name) + # Replace definition statement with explicit implementation naming + if bind_obj is not None: + def_uri = path_to_uri(bind_obj.file_ast.file.path) + for change in changes[def_uri]: + if change['range']['start']['line'] == bind_obj.sline-1: + change["newText"] = bind_change return {"changes": changes} def serve_codeActions(self, request): diff --git a/test/test_server.py b/test/test_server.py index 12dedfe..aca49e1 100644 --- a/test/test_server.py +++ b/test/test_server.py @@ -395,15 +395,15 @@ def check_return(result_array, checks): # free_path = os.path.join(test_dir, "subdir", "test_free.f90") check_return(results[1], ( + [21, 27, os.path.join(test_dir, "test_prog.f08")], + [5, 11, os.path.join(test_dir, "test_prog.f08")], [8, 14, free_path], [9, 15, free_path], [14, 20, free_path], [6, 12, free_path], [6, 12, free_path], [6, 12, free_path], - [6, 12, free_path], - [21, 27, os.path.join(test_dir, "test_prog.f08")], - [5, 11, os.path.join(test_dir, "test_prog.f08")] + [6, 12, free_path] )) From ea9e1e591c30695f87823029f049f8bcab83d610 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Sun, 31 Mar 2019 12:15:07 -0400 Subject: [PATCH 25/44] Fix case preservation in hover requests, fixes #102 --- fortls/objects.py | 5 +++-- fortls/parse_fortran.py | 7 ++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/fortls/objects.py b/fortls/objects.py index f304148..bab383d 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -627,7 +627,7 @@ def resolve_link(self, obj_tree): class fortran_subroutine(fortran_scope): def __init__(self, file_ast, line_number, name, args="", mod_sub=False, keywords=[]): self.base_setup(file_ast, line_number, name, keywords=keywords) - self.args = args.replace(' ', '').lower() + self.args = args.replace(' ', '') self.args_snip = self.args self.arg_objs = [] self.in_children = [] @@ -666,6 +666,7 @@ def resolve_arg_link(self, obj_tree): if self.args == '': return arg_list = self.args.replace(' ', '').split(',') + arg_list_lower = self.args.lower().replace(' ', '').split(',') self.arg_objs = [None for arg in arg_list] check_objs = self.children for child in self.children: @@ -674,7 +675,7 @@ def resolve_arg_link(self, obj_tree): self.missing_args = [] for child in check_objs: ind = -1 - for (i, arg) in enumerate(arg_list): + for (i, arg) in enumerate(arg_list_lower): if arg == child.name.lower(): ind = i break diff --git a/fortls/parse_fortran.py b/fortls/parse_fortran.py index 56be755..3b2be98 100644 --- a/fortls/parse_fortran.py +++ b/fortls/parse_fortran.py @@ -420,7 +420,7 @@ def read_var_def(line, type_word=None, fun_only=False): # kind_match = KIND_SPEC_REGEX.match(trailing_line) if kind_match is not None: - kind_str = kind_match.group(0).strip().lower() + kind_str = kind_match.group(0).strip() type_word += kind_str trailing_line = trailing_line[kind_match.end(0):] if kind_str[0] == '(': @@ -428,11 +428,12 @@ def read_var_def(line, type_word=None, fun_only=False): if match_char < 0: return None # Incomplete type spec else: - type_word += trailing_line[:match_char+1].strip().lower() + kind_word = trailing_line[:match_char+1].strip() + type_word += kind_word trailing_line = trailing_line[match_char+1:] else: # Class and Type statements need a kind spec - if type_word.lower() == 'class' or type_word.lower() == 'type': + if type_word.lower() in ('type', 'class'): return None # Make sure next character is space or comma or colon if not trailing_line[0] in (' ', ',', ':'): From daba4b7efe04772d7dc92c16b5e0e3458d7bfd7b Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Mon, 1 Apr 2019 20:58:29 -0400 Subject: [PATCH 26/44] Return copy of child list to prevent accidental modification --- fortls/objects.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fortls/objects.py b/fortls/objects.py index bab383d..e140873 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -437,7 +437,7 @@ def get_children(self, public_only=False): pub_children.append(child) return pub_children else: - return self.children + return copy.copy(self.children) def check_definitions(self, obj_tree): """Check for definition errors in scope""" @@ -668,7 +668,7 @@ def resolve_arg_link(self, obj_tree): arg_list = self.args.replace(' ', '').split(',') arg_list_lower = self.args.lower().replace(' ', '').split(',') self.arg_objs = [None for arg in arg_list] - check_objs = self.children + check_objs = copy.copy(self.children) for child in self.children: if child.is_external_int(): check_objs += child.get_children() @@ -1081,7 +1081,7 @@ def get_desc(self): return 'BLOCK' def get_children(self, public_only=False): - return self.children + return copy.copy(self.children) def req_named_end(self): return True From 37897b7230f6ba50e6042e3b991da362ee480163 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Mon, 1 Apr 2019 21:03:44 -0400 Subject: [PATCH 27/44] Release version 1.7.3 --- CHANGELOG.md | 9 +++++++++ fortls/__init__.py | 2 +- setup.py | 4 ++-- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 25c560f..3801493 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +## 1.7.3 + +### Fixes +* Fix case preservation in hover requests, fixes [#102](https://github.com/hansec/fortran-language-server/issues/102) +* Fix rename requests for type-bound procedures without an explicit link statement (ie. "=>"), fixes [#104](https://github.com/hansec/fortran-language-server/issues/104) +* Fix incorrect "CONTAINS" diagnostic errors with procedure pointers and external interfaces +* Fix bug in diagnostic construction/reporting (introduced in v1.7) +* Fix bugs caused by accidental modification of child object lists + ## 1.7.2 ### Fixes diff --git a/fortls/__init__.py b/fortls/__init__.py index d96b51c..e93cee5 100644 --- a/fortls/__init__.py +++ b/fortls/__init__.py @@ -6,7 +6,7 @@ from .langserver import LangServer from .jsonrpc import JSONRPC2Connection, ReadWriter, path_from_uri from .parse_fortran import fortran_file, process_file -__version__ = '1.7.2' +__version__ = '1.7.3' def error_exit(error_str): diff --git a/setup.py b/setup.py index fd55b95..3aaf34b 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ # Versions should comply with PEP440. For a discussion on single-sourcing # the version across setup.py and the project code, see # https://packaging.python.org/en/latest/single_source_version.html - version='1.7.2', + version='1.7.3', description='FORTRAN Language Server for the Language Server Protocol', @@ -17,7 +17,7 @@ # The project's main homepage. url='https://github.com/hansec/fortran-language-server', - download_url = 'https://github.com/hansec/fortran-language-server/archive/v1.7.2.tar.gz', + download_url = 'https://github.com/hansec/fortran-language-server/archive/v1.7.3.tar.gz', author='Chris Hansen', author_email = 'hansec@uw.edu', From 0cbfae34d3ef4c642a0c67bde821aa8d749d26fa Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Tue, 2 Apr 2019 11:08:00 -0400 Subject: [PATCH 28/44] Remove ordering requirements for references regression test --- test/test_server.py | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/test/test_server.py b/test/test_server.py index aca49e1..1a596a6 100644 --- a/test/test_server.py +++ b/test/test_server.py @@ -378,11 +378,18 @@ def def_request(file_path, line, char): def test_refs(): def check_return(result_array, checks): + def find_in_results(uri, sline): + for (i, result) in enumerate(result_array): + if (result["uri"] == uri) and (result["range"]["start"]["line"] == sline): + del result_array[i] + return result + return None assert len(result_array) == len(checks) - for (i, check) in enumerate(checks): - assert result_array[i]["uri"] == path_to_uri(check[2]) - assert result_array[i]["range"]["start"]["character"] == check[0] - assert result_array[i]["range"]["end"]["character"] == check[1] + for check in checks: + result = find_in_results(path_to_uri(check[0]), check[1]) + assert (result is not None) + assert result["range"]["start"]["character"] == check[2] + assert result["range"]["end"]["character"] == check[3] # string = write_rpc_request(1, "initialize", {"rootPath": test_dir}) file_path = os.path.join(test_dir, "test_prog.f08") @@ -395,15 +402,15 @@ def check_return(result_array, checks): # free_path = os.path.join(test_dir, "subdir", "test_free.f90") check_return(results[1], ( - [21, 27, os.path.join(test_dir, "test_prog.f08")], - [5, 11, os.path.join(test_dir, "test_prog.f08")], - [8, 14, free_path], - [9, 15, free_path], - [14, 20, free_path], - [6, 12, free_path], - [6, 12, free_path], - [6, 12, free_path], - [6, 12, free_path] + [os.path.join(test_dir, "test_prog.f08"), 2, 21, 27], + [os.path.join(test_dir, "test_prog.f08"), 9, 5, 11], + [free_path, 8, 8, 14], + [free_path, 16, 9, 15], + [free_path, 18, 14, 20], + [free_path, 36, 6, 12], + [free_path, 44, 6, 12], + [free_path, 50, 6, 12], + [free_path, 76, 6, 12] )) From eef6764a441f9de75a7a89e76945a8aadf385293 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Mon, 25 Mar 2019 20:59:19 -0400 Subject: [PATCH 29/44] Add support for line length diagnostics --- fortls/__init__.py | 12 +++++++++++- fortls/langserver.py | 8 +++++++- fortls/objects.py | 4 +--- fortls/parse_fortran.py | 38 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 57 insertions(+), 5 deletions(-) diff --git a/fortls/__init__.py b/fortls/__init__.py index e93cee5..633e661 100644 --- a/fortls/__init__.py +++ b/fortls/__init__.py @@ -63,6 +63,14 @@ def main(): '--enable_code_actions', action="store_true", help="Enable experimental code actions (default: false)" ) + parser.add_argument( + '--max_line_length', type=int, default=-1, + help="Maximum line length (default: disabled)" + ) + parser.add_argument( + '--max_comment_line_length', type=int, default=-1, + help="Maximum comment line length (default: disabled)" + ) parser.add_argument( '--debug_log', action="store_true", help="Generate debug log in project root folder" @@ -154,7 +162,9 @@ def main(): "use_signature_help": args.use_signature_help, "variable_hover": args.variable_hover, "sort_keywords": (not args.preserve_keyword_order), - "enable_code_actions": (args.enable_code_actions or args.debug_actions) + "enable_code_actions": (args.enable_code_actions or args.debug_actions), + "max_line_length": args.max_line_length, + "max_comment_line_length": args.max_comment_line_length } # if args.debug_parser: diff --git a/fortls/langserver.py b/fortls/langserver.py index d2b1c5d..c743e5d 100644 --- a/fortls/langserver.py +++ b/fortls/langserver.py @@ -83,6 +83,8 @@ def __init__(self, conn, debug_log=False, settings={}): self.variable_hover = settings.get("variable_hover", False) self.sort_keywords = settings.get("sort_keywords", True) self.enable_code_actions = settings.get("enable_code_actions", False) + self.max_line_length = settings.get("max_line_length", -1) + self.max_comment_line_length = settings.get("max_comment_line_length", -1) # Set object settings set_keyword_ordering(self.sort_keywords) @@ -193,6 +195,9 @@ def serve_initialize(self, request): self.lowercase_intrinsics = config_dict.get("lowercase_intrinsics", self.lowercase_intrinsics) self.debug_log = config_dict.get("debug_log", self.debug_log) self.pp_defs = config_dict.get("pp_defs", {}) + self.max_line_length = config_dict.get("max_line_length", self.max_line_length) + self.max_comment_line_length = config_dict.get("max_comment_line_length", + self.max_comment_line_length) if isinstance(self.pp_defs, list): self.pp_defs = {key: "" for key in self.pp_defs} except: @@ -1041,7 +1046,8 @@ def get_diagnostics(self, uri): file_obj = self.workspace.get(filepath) if file_obj is not None: try: - diags = file_obj.ast.check_file(self.obj_tree) + diags = file_obj.check_file(self.obj_tree, max_line_length=self.max_line_length, + max_comment_line_length=self.max_comment_line_length) except Exception as e: return None, e else: diff --git a/fortls/objects.py b/fortls/objects.py index e140873..ec4a55e 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -1725,6 +1725,4 @@ def check_file(self, obj_tree): errors += scope.check_use(obj_tree) errors += scope.check_definitions(obj_tree) errors += scope.get_diagnostics() - for error in errors: - diagnostics.append(error.build(self.file)) - return diagnostics + return errors, diagnostics diff --git a/fortls/parse_fortran.py b/fortls/parse_fortran.py index 3b2be98..1c77eac 100644 --- a/fortls/parse_fortran.py +++ b/fortls/parse_fortran.py @@ -1182,6 +1182,44 @@ def replace_vars(line): self.contents_pp = output_file return pp_skips, pp_defines + def check_file(self, obj_tree, max_line_length=-1, max_comment_line_length=-1): + diagnostics = [] + if (max_line_length > 0) or (max_comment_line_length > 0): + line_message = 'Line length exceeds "max_line_length" ({0})'.format(max_line_length) + comment_message = 'Comment line length exceeds "max_comment_line_length" ({0})'.format( + max_comment_line_length + ) + if self.fixed: + COMMENT_LINE_MATCH = FIXED_COMMENT_LINE_MATCH + else: + COMMENT_LINE_MATCH = FREE_COMMENT_LINE_MATCH + for (i, line) in enumerate(self.contents_split): + if COMMENT_LINE_MATCH.match(line) is None: + if (max_line_length > 0) and (len(line) > max_line_length): + diagnostics.append({ + "range": { + "start": {"line": i, "character": max_line_length}, + "end": {"line": i, "character": len(line)} + }, + "message": line_message, + "severity": 2 + }) + else: + if (max_comment_line_length > 0) and (len(line) > max_comment_line_length): + diagnostics.append({ + "range": { + "start": {"line": i, "character": max_comment_line_length}, + "end": {"line": i, "character": len(line)} + }, + "message": comment_message, + "severity": 2 + }) + errors, diags_ast = self.ast.check_file(obj_tree) + diagnostics += diags_ast + for error in errors: + diagnostics.append(error.build(self)) + return diagnostics + def_tests = [ read_var_def, read_sub_def, read_fun_def, read_block_def, From 28e37cc76559655baeec50cc9687e19be6f94cb8 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Sun, 31 Mar 2019 15:49:37 -0400 Subject: [PATCH 30/44] Add full support for ASSOCIATE statements, fixes #101 - Resolve links recursively into subroutines/functions --- fortls/langserver.py | 7 +- fortls/objects.py | 147 ++++++++++++++++++++++++++++++++++++++ fortls/parse_fortran.py | 152 ++++++---------------------------------- 3 files changed, 173 insertions(+), 133 deletions(-) diff --git a/fortls/langserver.py b/fortls/langserver.py index c743e5d..85dd326 100644 --- a/fortls/langserver.py +++ b/fortls/langserver.py @@ -5,10 +5,11 @@ # Local modules from fortls.jsonrpc import path_to_uri, path_from_uri from fortls.parse_fortran import fortran_file, process_file, get_paren_level, \ - get_var_stack, climb_type_tree, expand_name, get_line_context + expand_name, get_line_context from fortls.objects import find_in_scope, find_in_workspace, get_use_tree, \ - set_keyword_ordering, MODULE_TYPE_ID, SUBROUTINE_TYPE_ID, FUNCTION_TYPE_ID, \ - CLASS_TYPE_ID, INTERFACE_TYPE_ID, SELECT_TYPE_ID, METH_TYPE_ID + get_var_stack, climb_type_tree, set_keyword_ordering, MODULE_TYPE_ID, \ + SUBROUTINE_TYPE_ID, FUNCTION_TYPE_ID, CLASS_TYPE_ID, INTERFACE_TYPE_ID, \ + SELECT_TYPE_ID, METH_TYPE_ID from fortls.intrinsics import get_intrinsic_keywords, load_intrinsics, \ set_lowercase_intrinsics diff --git a/fortls/objects.py b/fortls/objects.py index ec4a55e..99c0559 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -4,6 +4,7 @@ from fortls.jsonrpc import path_to_uri CLASS_VAR_REGEX = re.compile(r'(TYPE|CLASS)[ ]*\(', re.I) DEF_KIND_REGEX = re.compile(r'([a-z]*)[ ]*\((?:KIND|LEN)?[ =]*([a-z_][a-z0-9_]*)', re.I) +OBJBREAK_REGEX = re.compile(r'[\/\-(.,+*<>=$: ]', re.I) # Keyword identifiers KEYWORD_LIST = [ 'pointer', @@ -208,6 +209,128 @@ def add_children(mod_obj, query): return matching_symbols +def get_paren_level(line): + """Get sub-string corresponding to a single parenthesis level, + via backward search up through the line. + + Examples: + "CALL sub1(arg1,arg2" -> ("arg1,arg2", [[10, 19]]) + "CALL sub1(arg1(i),arg2" -> ("arg1,arg2", [[10, 14], [17, 22]]) + """ + if line == '': + return '', [[0, 0]] + level = 0 + in_string = False + string_char = "" + i1 = len(line) + sections = [] + for i in range(len(line)-1, -1, -1): + char = line[i] + if in_string: + if char == string_char: + in_string = False + continue + if (char == '(') or (char == '['): + level -= 1 + if level == 0: + i1 = i + elif level < 0: + sections.append([i+1, i1]) + break + elif (char == ')') or (char == ']'): + level += 1 + if level == 1: + sections.append([i+1, i1]) + elif (char == "'") or (char == '"'): + in_string = True + string_char = char + if level == 0: + sections.append([i, i1]) + sections.reverse() + out_string = "" + for section in sections: + out_string += line[section[0]:section[1]] + return out_string, sections + + +def get_var_stack(line): + """Get user-defined type field sequence terminating the given line + + Examples: + "myvar%foo%bar" -> ["myvar", "foo", "bar"] + "CALL self%method(this%foo" -> ["this", "foo"] + """ + if len(line) == 0: + return None + final_var, sections = get_paren_level(line) + if final_var == '': + return [''] + if final_var.find('%') < 0: + final_paren = sections[-1] + ntail = final_paren[1] - final_paren[0] + # + if ntail == 0: + final_var = '' + elif ntail > 0: + final_var = final_var[len(final_var)-ntail:] + # + if final_var is not None: + final_op_split = OBJBREAK_REGEX.split(final_var) + return final_op_split[-1].split('%') + else: + return None + + +def climb_type_tree(var_stack, curr_scope, obj_tree): + """Walk up user-defined type sequence to determine final field type""" + def get_type_name(var_obj): + type_desc = get_paren_substring(var_obj.get_desc()) + if type_desc is not None: + type_desc = type_desc.strip().lower() + return type_desc + # Find base variable in current scope + type_name = None + type_scope = None + iVar = 0 + var_name = var_stack[iVar].strip().lower() + var_obj = find_in_scope(curr_scope, var_name, obj_tree) + if var_obj is None: + return None + else: + type_name = get_type_name(var_obj) + curr_scope = var_obj.parent + # Search for type, then next variable in stack and so on + for _ in range(30): + # Find variable type in available scopes + if type_name is None: + break + type_scope = find_in_scope(curr_scope, type_name, obj_tree) + # Exit if not found + if type_scope is None: + break + curr_scope = type_scope.parent + # Go to next variable in stack and exit if done + iVar += 1 + if iVar == len(var_stack)-1: + break + # Find next variable by name in scope + var_name = var_stack[iVar].strip().lower() + var_obj = find_in_scope(type_scope, var_name, obj_tree) + # Set scope to declaration location if variable is inherited + if var_obj is not None: + curr_scope = var_obj.parent + if (var_obj.parent is not None) and (var_obj.parent.get_type() == CLASS_TYPE_ID): + for in_child in var_obj.parent.in_children: + if (in_child.name.lower() == var_name) and (in_child.parent is not None): + curr_scope = in_child.parent + type_name = get_type_name(var_obj) + else: + break + else: + raise KeyError + return type_scope + + class fortran_diagnostic: def __init__(self, sline, message, severity=1, find_word=None): self.sline = sline @@ -622,6 +745,9 @@ def resolve_link(self, obj_tree): prototype.resolve_link(obj_tree) child.copy_interface(prototype) break + # Recurse into children + for child in self.children: + child.resolve_link(obj_tree) class fortran_subroutine(fortran_scope): @@ -691,6 +817,8 @@ def resolve_arg_link(self, obj_tree): def resolve_link(self, obj_tree): self.resolve_arg_link(obj_tree) + for child in self.children: + child.resolve_link(obj_tree) def get_type(self, no_link=False): return SUBROUTINE_TYPE_ID @@ -860,6 +988,8 @@ def resolve_link(self, obj_tree): for child in self.children: if child.name.lower() == result_var_lower: self.result_obj = child + for child in self.children: + child.resolve_link(obj_tree) def get_type(self, no_link=False): return FUNCTION_TYPE_ID @@ -1123,6 +1253,7 @@ def get_desc(self): class fortran_associate(fortran_block): def __init__(self, file_ast, line_number, name): self.base_setup(file_ast, line_number, name) + self.assoc_links = [] def get_type(self, no_link=False): return ASSOC_TYPE_ID @@ -1130,6 +1261,22 @@ def get_type(self, no_link=False): def get_desc(self): return 'ASSOCIATE' + def create_binding_variable(self, file_ast, line_number, bound_name, link_var): + new_var = fortran_var(file_ast, line_number, bound_name, 'UNKNOWN', []) + self.assoc_links.append([new_var, bound_name, link_var]) + return new_var + + def resolve_link(self, obj_tree): + for assoc_link in self.assoc_links: + var_stack = get_var_stack(assoc_link[2]) + if len(var_stack) > 1: + type_scope = climb_type_tree(var_stack, self, obj_tree) + if type_scope is None: + continue + var_obj = find_in_scope(type_scope, var_stack[-1], obj_tree) + if var_obj is not None: + assoc_link[0].link_obj = var_obj + class fortran_enum(fortran_block): def __init__(self, file_ast, line_number, name): diff --git a/fortls/parse_fortran.py b/fortls/parse_fortran.py index 1c77eac..4134bcc 100644 --- a/fortls/parse_fortran.py +++ b/fortls/parse_fortran.py @@ -1,12 +1,12 @@ from __future__ import print_function import sys import re -from fortls.objects import get_paren_substring, map_keywords, find_in_scope, \ +from fortls.objects import get_paren_substring, map_keywords, get_paren_level, \ fortran_ast, fortran_module, fortran_program, fortran_submodule, \ fortran_subroutine, fortran_function, fortran_block, fortran_select, \ fortran_type, fortran_enum, fortran_int, fortran_var, fortran_meth, \ fortran_associate, fortran_do, fortran_where, fortran_if, \ - INTERFACE_TYPE_ID, SELECT_TYPE_ID, CLASS_TYPE_ID + INTERFACE_TYPE_ID, SELECT_TYPE_ID PY3K = sys.version_info >= (3, 0) if not PY3K: import io @@ -69,7 +69,6 @@ TATTR_LIST_REGEX = re.compile(r'[ ]*,[ ]*(PUBLIC|PRIVATE|ABSTRACT|EXTENDS\([a-z0-9_]*\))', re.I) VIS_REGEX = re.compile(r'[ ]*(PUBLIC|PRIVATE)', re.I) WORD_REGEX = re.compile(r'[a-z_][a-z0-9_]*', re.I) -OBJBREAK_REGEX = re.compile(r'[\/\-(.,+*<>=$: ]', re.I) SUB_PAREN_MATCH = re.compile(r'\([a-z0-9_, ]*\)', re.I) KIND_SPEC_MATCH = re.compile(r'\([a-z0-9_, =*]*\)', re.I) SQ_STRING_REGEX = re.compile(r'\'[^\']*\'', re.I) @@ -100,34 +99,6 @@ END_REGEX = re.compile(r'[ ]*(END)( |MODULE|PROGRAM|SUBROUTINE|FUNCTION|TYPE|DO|IF|SELECT)?', re.I) -def get_var_stack(line): - """Get user-defined type field sequence terminating the given line - - Examples: - "myvar%foo%bar" -> ["myvar", "foo", "bar"] - "CALL self%method(this%foo" -> ["this", "foo"] - """ - if len(line) == 0: - return None - final_var, sections = get_paren_level(line) - if final_var == '': - return [''] - if final_var.find('%') < 0: - final_paren = sections[-1] - ntail = final_paren[1] - final_paren[0] - # - if ntail == 0: - final_var = '' - elif ntail > 0: - final_var = final_var[len(final_var)-ntail:] - # - if final_var is not None: - final_op_split = OBJBREAK_REGEX.split(final_var) - return final_op_split[-1].split('%') - else: - return None - - def expand_name(line, char_poss): """Get full word containing given cursor position""" for word_match in WORD_REGEX.finditer(line): @@ -136,56 +107,6 @@ def expand_name(line, char_poss): return '' -def climb_type_tree(var_stack, curr_scope, obj_tree): - """Walk up user-defined type sequence to determine final field type""" - def get_type_name(var_obj): - type_desc = get_paren_substring(var_obj.get_desc()) - if type_desc is not None: - type_desc = type_desc.strip().lower() - return type_desc - # Find base variable in current scope - type_name = None - type_scope = None - iVar = 0 - var_name = var_stack[iVar].strip().lower() - var_obj = find_in_scope(curr_scope, var_name, obj_tree) - if var_obj is None: - return None - else: - type_name = get_type_name(var_obj) - curr_scope = var_obj.parent - # Search for type, then next variable in stack and so on - for _ in range(30): - # Find variable type in available scopes - if type_name is None: - break - type_scope = find_in_scope(curr_scope, type_name, obj_tree) - # Exit if not found - if type_scope is None: - break - curr_scope = type_scope.parent - # Go to next variable in stack and exit if done - iVar += 1 - if iVar == len(var_stack)-1: - break - # Find next variable by name in scope - var_name = var_stack[iVar].strip().lower() - var_obj = find_in_scope(type_scope, var_name, obj_tree) - # Set scope to declaration location if variable is inherited - if var_obj is not None: - curr_scope = var_obj.parent - if (var_obj.parent is not None) and (var_obj.parent.get_type() == CLASS_TYPE_ID): - for in_child in var_obj.parent.in_children: - if (in_child.name.lower() == var_name) and (in_child.parent is not None): - curr_scope = in_child.parent - type_name = get_type_name(var_obj) - else: - break - else: - raise KeyError - return type_scope - - def get_line_context(line): """Get context of ending position in line (for completion)""" last_level, sections = get_paren_level(line) @@ -338,50 +259,6 @@ def find_paren_match(test_str): return ind -def get_paren_level(line): - """Get sub-string corresponding to a single parenthesis level, - via backward search up through the line. - - Examples: - "CALL sub1(arg1,arg2" -> ("arg1,arg2", [[10, 19]]) - "CALL sub1(arg1(i),arg2" -> ("arg1,arg2", [[10, 14], [17, 22]]) - """ - if line == '': - return '', [[0, 0]] - level = 0 - in_string = False - string_char = "" - i1 = len(line) - sections = [] - for i in range(len(line)-1, -1, -1): - char = line[i] - if in_string: - if char == string_char: - in_string = False - continue - if (char == '(') or (char == '['): - level -= 1 - if level == 0: - i1 = i - elif level < 0: - sections.append([i+1, i1]) - break - elif (char == ')') or (char == ']'): - level += 1 - if level == 1: - sections.append([i+1, i1]) - elif (char == "'") or (char == '"'): - in_string = True - string_char = char - if level == 0: - sections.append([i, i1]) - sections.reverse() - out_string = "" - for section in sections: - out_string += line[section[0]:section[1]] - return out_string, sections - - def parse_var_keywords(test_str): """Parse Fortran variable declaration keywords""" keyword_match = KEYWORD_LIST_REGEX.match(test_str) @@ -549,10 +426,6 @@ def read_block_def(line): else: return 'where', False # - assoc_match = ASSOCIATE_REGEX.match(line) - if assoc_match is not None: - return 'assoc', None - # if_match = IF_REGEX.match(line) if if_match is not None: then_match = THEN_REGEX.search(line_no_comment) @@ -561,6 +434,17 @@ def read_block_def(line): return None +def read_associate_def(line): + assoc_match = ASSOCIATE_REGEX.match(line) + if assoc_match is not None: + trailing_line = line[assoc_match.end(0):] + match_char = find_paren_match(trailing_line) + if match_char < 0: + return 'assoc', [] + var_words = separate_def_list(trailing_line[:match_char].strip()) + return 'assoc', var_words + + def read_select_def(line): """Attempt to read SELECT definition line""" select_match = SELECT_REGEX.match(line) @@ -1223,7 +1107,7 @@ def check_file(self, obj_tree, max_line_length=-1, max_comment_line_length=-1): def_tests = [ read_var_def, read_sub_def, read_fun_def, read_block_def, - read_select_def, read_type_def, read_enum_def, read_use_stmt, + read_associate_def, read_select_def, read_type_def, read_enum_def, read_use_stmt, read_int_def, read_generic_def, read_mod_def, read_prog_def, read_submod_def, read_inc_stmt, read_vis_stmnt ] @@ -1557,6 +1441,14 @@ def process_file(file_obj, close_open_scopes, debug=False, pp_defs=None): name = '#ASSOC{0}'.format(block_counter) new_assoc = fortran_associate(file_ast, line_number, name) file_ast.add_scope(new_assoc, END_ASSOCIATE_WORD, req_container=True) + for bound_var in obj: + binding_split = bound_var.split('=>') + if len(binding_split) == 2: + binding_name = binding_split[0].strip() + link_name = binding_split[1].strip() + file_ast.add_variable(new_assoc.create_binding_variable( + file_ast, line_number, binding_name, link_name + )) if(debug): print('{1} !!! ASSOCIATE statement({0})'.format(line_number, line.strip())) elif obj_type == 'if': From 6b71f1213f70b870bc5ed3337ea6b799c02a86fa Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Tue, 2 Apr 2019 11:13:54 -0400 Subject: [PATCH 31/44] Simplify completion and signature request results - Return suggestion array only (or None) for completion requests - Return None instead of empty list in signature requests --- fortls/__init__.py | 13 ++++++++----- fortls/langserver.py | 35 +++++++++++++++-------------------- test/test_server.py | 6 +++--- 3 files changed, 26 insertions(+), 28 deletions(-) diff --git a/fortls/__init__.py b/fortls/__init__.py index 633e661..b782663 100644 --- a/fortls/__init__.py +++ b/fortls/__init__.py @@ -314,9 +314,12 @@ def main(): "position": {"line": args.debug_line-1, "character": args.debug_char-1} } }) - print(' Results:') - for obj in completion_results['items']: - print(' {0}: {1} -> {2}'.format(obj['kind'], obj['label'], obj['detail'])) + if completion_results is None: + print(' No results!') + else: + print(' Results:') + for obj in completion_results: + print(' {0}: {1} -> {2}'.format(obj['kind'], obj['label'], obj['detail'])) # if args.debug_signature: print('\nTesting "textDocument/signatureHelp" request:') @@ -332,8 +335,8 @@ def main(): "position": {"line": args.debug_line-1, "character": args.debug_char-1} } }) - if len(signature_results['signatures']) == 0: - print(' No Results') + if signature_results is None: + print(' No Results!') else: print(' Results:') active_param = signature_results.get('activeParameter', 0) diff --git a/fortls/langserver.py b/fortls/langserver.py index 85dd326..1e38485 100644 --- a/fortls/langserver.py +++ b/fortls/langserver.py @@ -459,13 +459,12 @@ def build_comp(candidate, name_only=False, name_replace=None, is_interface=False comp_obj["documentation"] = doc_str return comp_obj # Get parameters from request - req_dict = {"isIncomplete": False, "items": []} params = request["params"] uri = params["textDocument"]["uri"] path = path_from_uri(uri) file_obj = self.workspace.get(path) if file_obj is None: - return req_dict + return None # Check line ac_line = params["position"]["line"] ac_char = params["position"]["character"] @@ -473,14 +472,14 @@ def build_comp(candidate, name_only=False, name_replace=None, is_interface=False pre_lines, curr_line, _ = file_obj.get_code_line(ac_line, forward=False, strip_comment=True) line_prefix = get_line_prefix(pre_lines, curr_line, ac_char) if line_prefix is None: - return req_dict + return None is_member = False try: var_stack = get_var_stack(line_prefix) is_member = (len(var_stack) > 1) var_prefix = var_stack[-1].strip() except: - return req_dict + return None # print(var_stack) item_list = [] scope_list = file_obj.ast.get_scopes(ac_line+1) @@ -490,7 +489,7 @@ def build_comp(candidate, name_only=False, name_replace=None, is_interface=False include_globals = True line_context, context_info = get_line_context(line_prefix) if (line_context == 'skip') or (var_prefix == '' and (not is_member)): - return req_dict + return None if self.autocomplete_no_prefix: var_prefix = '' # Suggestions for user-defined type members @@ -499,7 +498,7 @@ def build_comp(candidate, name_only=False, name_replace=None, is_interface=False type_scope = climb_type_tree(var_stack, curr_scope, self.obj_tree) # Set enclosing type as scope if type_scope is None: - return {"isIncomplete": False, "items": []} + return None else: include_globals = False scope_list = [type_scope] @@ -516,8 +515,7 @@ def build_comp(candidate, name_only=False, name_replace=None, is_interface=False if (candidate.get_type() == MODULE_TYPE_ID) and \ candidate.name.lower().startswith(var_prefix): item_list.append(build_comp(candidate, name_only=True)) - req_dict["items"] = item_list - return req_dict + return item_list elif line_context == 'mod_mems': # Public module members only (USE ONLY statement) name_only = True @@ -528,7 +526,7 @@ def build_comp(candidate, name_only=False, name_replace=None, is_interface=False include_globals = False type_mask[4] = False else: - return {"isIncomplete": False, "items": []} + return None elif line_context == 'call': # Callable objects only ("CALL" statements) req_callable = True @@ -568,8 +566,7 @@ def build_comp(candidate, name_only=False, name_replace=None, is_interface=False for candidate in get_intrinsic_keywords(self.statements, self.keywords, key_context): if candidate.name.lower().startswith(var_prefix): item_list.append(build_comp(candidate)) - req_dict["items"] = item_list - return req_dict + return item_list elif line_context == 'first': # First word -> default context plus Fortran statements for candidate in get_intrinsic_keywords(self.statements, self.keywords, 0): @@ -595,8 +592,7 @@ def build_comp(candidate, name_only=False, name_replace=None, is_interface=False continue # item_list.append(build_comp(candidate, name_only=name_only)) - req_dict["items"] = item_list - return req_dict + return item_list def get_definition(self, def_file, def_line, def_char): # Get full line (and possible continuations) from file @@ -661,13 +657,12 @@ def check_optional(arg, params): return i return None # Get parameters from request - req_dict = {"signatures": []} params = request["params"] uri = params["textDocument"]["uri"] path = path_from_uri(uri) file_obj = self.workspace.get(path) if file_obj is None: - return req_dict + return None # Check line sig_line = params["position"]["line"] sig_char = params["position"]["character"] @@ -675,17 +670,17 @@ def check_optional(arg, params): pre_lines, curr_line, _ = file_obj.get_code_line(sig_line, forward=False, strip_comment=True) line_prefix = get_line_prefix(pre_lines, curr_line, sig_char) if line_prefix is None: - return req_dict + return None # Test if scope declaration or end statement if SCOPE_DEF_REGEX.match(curr_line) or END_REGEX.match(curr_line): - return req_dict + return None is_member = False try: sub_name, arg_strings, sub_end = get_sub_name(line_prefix) var_stack = get_var_stack(sub_name) is_member = (len(var_stack) > 1) except: - return req_dict + return None # curr_scope = file_obj.ast.get_inner_scope(sig_line+1) # Traverse type tree if necessary @@ -719,11 +714,11 @@ def check_optional(arg, params): var_obj = candidate break if var_obj is None: - return req_dict + return None # Build signature label, doc_str, params = var_obj.get_signature() if label is None: - return req_dict + return None # Find current parameter by index or by # looking at last arg with optional name param_num = len(arg_strings)-1 diff --git a/test/test_server.py b/test/test_server.py index 1a596a6..3642290 100644 --- a/test/test_server.py +++ b/test/test_server.py @@ -191,10 +191,10 @@ def check_return(result_array): def test_comp(): def check_return(result_array, checks): - assert len(result_array["items"]) == checks[0] + assert len(result_array) == checks[0] if checks[0] > 0: - assert result_array["items"][0]["label"] == checks[1] - assert result_array["items"][0]["detail"] == checks[2] + assert result_array[0]["label"] == checks[1] + assert result_array[0]["detail"] == checks[2] def comp_request(file_path, line, char): return write_rpc_request(1, "textDocument/completion", { From 55e631bd9e18dbad9620d934f6d663562a3c6cd6 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Tue, 2 Apr 2019 11:38:53 -0400 Subject: [PATCH 32/44] Add support for filtering completion suggestions after "MODULE PROCEDURE" statements, fixes #103 --- fortls/langserver.py | 40 ++++++++++++++++++++++++++-------------- fortls/parse_fortran.py | 6 +++++- 2 files changed, 31 insertions(+), 15 deletions(-) diff --git a/fortls/langserver.py b/fortls/langserver.py index 1e38485..0edd821 100644 --- a/fortls/langserver.py +++ b/fortls/langserver.py @@ -9,7 +9,7 @@ from fortls.objects import find_in_scope, find_in_workspace, get_use_tree, \ get_var_stack, climb_type_tree, set_keyword_ordering, MODULE_TYPE_ID, \ SUBROUTINE_TYPE_ID, FUNCTION_TYPE_ID, CLASS_TYPE_ID, INTERFACE_TYPE_ID, \ - SELECT_TYPE_ID, METH_TYPE_ID + SELECT_TYPE_ID, VAR_TYPE_ID, METH_TYPE_ID from fortls.intrinsics import get_intrinsic_keywords, load_intrinsics, \ set_lowercase_intrinsics @@ -390,7 +390,8 @@ def map_types(type): def set_type_mask(def_value): return [def_value if i < 8 else True for i in range(15)] - def get_candidates(scope_list, var_prefix, inc_globals=True, public_only=False, abstract_only=False): + def get_candidates(scope_list, var_prefix, inc_globals=True, + public_only=False, abstract_only=False, no_use=False): # def child_candidates(scope, only_list=[], filter_public=True, req_abstract=False): tmp_list = [] @@ -413,7 +414,8 @@ def child_candidates(scope, only_list=[], filter_public=True, req_abstract=False for scope in scope_list: var_list += child_candidates(scope, filter_public=public_only, req_abstract=abstract_only) # Traverse USE tree and add to list - use_dict = get_use_tree(scope, use_dict, self.obj_tree) + if not no_use: + use_dict = get_use_tree(scope, use_dict, self.obj_tree) # Look in found use modules for use_mod, only_list in use_dict.items(): scope = self.obj_tree[use_mod][0] @@ -505,9 +507,10 @@ def build_comp(candidate, name_only=False, name_replace=None, is_interface=False # Setup based on context req_callable = False abstract_only = False + no_use = False type_mask = set_type_mask(False) - type_mask[1] = True - type_mask[4] = True + type_mask[MODULE_TYPE_ID] = True + type_mask[CLASS_TYPE_ID] = True if line_context == 'mod_only': # Module names only (USE statement) for key in self.obj_tree: @@ -524,35 +527,43 @@ def build_comp(candidate, name_only=False, name_replace=None, is_interface=False scope_list = [self.obj_tree[mod_name][0]] public_only = True include_globals = False - type_mask[4] = False + type_mask[CLASS_TYPE_ID] = False else: return None + elif line_context == 'pro_link': + # Link to local subroutine/functions + type_mask = set_type_mask(True) + type_mask[SUBROUTINE_TYPE_ID] = False + type_mask[FUNCTION_TYPE_ID] = False + name_only = True + include_globals = False + no_use = True elif line_context == 'call': # Callable objects only ("CALL" statements) req_callable = True elif line_context == 'type_only': # User-defined types only (variable definitions, select clauses) type_mask = set_type_mask(True) - type_mask[4] = False + type_mask[CLASS_TYPE_ID] = False elif line_context == 'import': # Import statement (variables and user-defined types only) name_only = True type_mask = set_type_mask(True) - type_mask[4] = False - type_mask[6] = False + type_mask[CLASS_TYPE_ID] = False + type_mask[VAR_TYPE_ID] = False elif line_context == 'int_only': # Interfaces only (procedure definitions) abstract_only = True include_globals = False name_only = True type_mask = set_type_mask(True) - type_mask[2] = False - type_mask[3] = False + type_mask[SUBROUTINE_TYPE_ID] = False + type_mask[FUNCTION_TYPE_ID] = False elif line_context == 'var_only': # Variables only (variable definitions) name_only = True - type_mask[2] = True - type_mask[3] = True + type_mask[SUBROUTINE_TYPE_ID] = True + type_mask[FUNCTION_TYPE_ID] = True elif line_context == 'var_key': # Variable definition keywords only (variable definition) key_context = 0 @@ -573,7 +584,8 @@ def build_comp(candidate, name_only=False, name_replace=None, is_interface=False if candidate.name.lower().startswith(var_prefix): item_list.append(build_comp(candidate)) # Build completion list - for candidate in get_candidates(scope_list, var_prefix, include_globals, public_only, abstract_only): + for candidate in get_candidates(scope_list, var_prefix, include_globals, + public_only, abstract_only, no_use): # Skip module names (only valid in USE) candidate_type = candidate.get_type() if type_mask[candidate_type]: diff --git a/fortls/parse_fortran.py b/fortls/parse_fortran.py index 4134bcc..8f6d7b3 100644 --- a/fortls/parse_fortran.py +++ b/fortls/parse_fortran.py @@ -95,7 +95,8 @@ INT_STMNT_REGEX = re.compile(r'^[ ]*[a-z]*$', re.I) TYPE_STMNT_REGEX = re.compile(r'[ ]*(TYPE|CLASS)[ ]*(IS)?[ ]*$', re.I) PROCEDURE_STMNT_REGEX = re.compile(r'[ ]*(PROCEDURE)[ ]*$', re.I) -SCOPE_DEF_REGEX = re.compile(r'[ ]*(MODULE|PROGRAM|SUBROUTINE|FUNCTION)[ ]+', re.I) +PRO_LINK_REGEX = re.compile(r'[ ]*(MODULE[ ]*PROCEDURE )', re.I) +SCOPE_DEF_REGEX = re.compile(r'[ ]*(MODULE|PROGRAM|SUBROUTINE|FUNCTION|INTERFACE)[ ]+', re.I) END_REGEX = re.compile(r'[ ]*(END)( |MODULE|PROGRAM|SUBROUTINE|FUNCTION|TYPE|DO|IF|SELECT)?', re.I) @@ -125,6 +126,9 @@ def get_line_context(line): return 'mod_mems', test_match[1][0] else: return 'mod_only', None + # Test for interface procedure link + if PRO_LINK_REGEX.match(line): + return 'pro_link', None # Test if scope declaration or end statement (no completion provided) if SCOPE_DEF_REGEX.match(line) or END_REGEX.match(line): return 'skip', None From 603d95aaa656374b5f2925ebbffcd5ce69266c3c Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Tue, 2 Apr 2019 12:52:34 -0400 Subject: [PATCH 33/44] Add support for filtering completion results in type-bound procedure links --- fortls/parse_fortran.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fortls/parse_fortran.py b/fortls/parse_fortran.py index 8f6d7b3..286d0b7 100644 --- a/fortls/parse_fortran.py +++ b/fortls/parse_fortran.py @@ -118,6 +118,10 @@ def get_line_context(line): if test_match[0] == 'var': if (test_match[1][2] is None) and (lev1_end == len(line)): return 'var_key', None + # Procedure link? + type_word = test_match[1][0] + if (type_word == 'PROCEDURE') and (line.find("=>") > 0): + return 'pro_link', None return 'var_only', None # Test if in USE statement test_match = read_use_stmt(line) @@ -314,7 +318,7 @@ def read_var_def(line, type_word=None, fun_only=False): trailing_line = trailing_line[match_char+1:] else: # Class and Type statements need a kind spec - if type_word.lower() in ('type', 'class'): + if type_word in ('TYPE', 'CLASS'): return None # Make sure next character is space or comma or colon if not trailing_line[0] in (' ', ',', ':'): From bb52e341a242b041a8fc2c08f0dca57561f3c1e5 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Tue, 2 Apr 2019 13:09:58 -0400 Subject: [PATCH 34/44] Add support for including external source file directories - Add warnings for invalid "source_dirs" and "ext_source_dirs" given in settings file --- README.rst | 6 ++++++ fortls/langserver.py | 16 +++++++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index b442016..4795563 100644 --- a/README.rst +++ b/README.rst @@ -151,6 +151,11 @@ the ``source_dirs`` variable in the ``.fortls`` file. When ``source_dirs`` is sp recursively, so any nested sub directories must be explicitly listed. However, ``root_dir`` does not need to be specified manually as it is always included. +External source files (ex. libraries) can also be included in language server results by specifying their paths +in the ``ext_source_dirs`` variable in the ``.fortls`` file. These files will be parsed during initialization, +but will not be updated with any changes made until the language server is restarted. As with ``source_dirs``, +specified directories are not added recursively, so any nested sub directories must be explicitly listed. + *Note:* The previous naming convention for source file directories (``mod_dirs``) is still supported but has been deprecated. @@ -175,6 +180,7 @@ test can be evaluated by the server or if the region is the *default* path (ie. "excl_paths": ["subdir3", "subdir1/file_to_skip.F90"], "excl_suffixes": ["_skip.f90"], "pp_defs": {"HAVE_PACKAGE": ""}, + "ext_source_dirs": ["/path/to/fortran/library"], "lowercase_intrinsics": false, "debug_log": false } diff --git a/fortls/langserver.py b/fortls/langserver.py index 0edd821..d23f5e9 100644 --- a/fortls/langserver.py +++ b/fortls/langserver.py @@ -185,6 +185,7 @@ def serve_initialize(self, request): for excl_path in config_dict.get("excl_paths", []): self.excl_paths.append(os.path.join(self.root_path, excl_path)) source_dirs = config_dict.get("source_dirs", []) + ext_source_dirs = config_dict.get("ext_source_dirs", []) # Legacy definition if len(source_dirs) == 0: source_dirs = config_dict.get("mod_dirs", []) @@ -192,6 +193,19 @@ def serve_initialize(self, request): dir_path = os.path.join(self.root_path, source_dir) if os.path.isdir(dir_path): self.source_dirs.append(dir_path) + else: + self.post_messages.append( + [2, r'Source directory "{0}" specified in ' + r'".fortls" settings file does not exist'.format(dir_path)] + ) + for ext_source_dir in ext_source_dirs: + if os.path.isdir(ext_source_dir): + self.source_dirs.append(ext_source_dir) + else: + self.post_messages.append( + [2, r'External source directory "{0}" specified in ' + r'".fortls" settings file does not exist'.format(ext_source_dir)] + ) self.excl_suffixes = config_dict.get("excl_suffixes", []) self.lowercase_intrinsics = config_dict.get("lowercase_intrinsics", self.lowercase_intrinsics) self.debug_log = config_dict.get("debug_log", self.debug_log) @@ -202,7 +216,7 @@ def serve_initialize(self, request): if isinstance(self.pp_defs, list): self.pp_defs = {key: "" for key in self.pp_defs} except: - self.post_messages.append([1, "Error while parsing '.fortls' settings file"]) + self.post_messages.append([1, 'Error while parsing ".fortls" settings file']) # Setup logging if self.debug_log and (self.root_path != ""): logging.basicConfig(filename=os.path.join(self.root_path, "fortls_debug.log"), From a2eb3a83c2adc6c31de09d7fed95cd28a57ee7a3 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Tue, 2 Apr 2019 13:12:32 -0400 Subject: [PATCH 35/44] Update README with updates to command-line options --- README.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.rst b/README.rst index 4795563..b69ce1f 100644 --- a/README.rst +++ b/README.rst @@ -104,6 +104,8 @@ The following global settings can be used when launching the language server. * ``--variable_hover`` Show hover information for variables (default: subroutines/functions only) * ``--preserve_keyword_order`` Display variable keywords information in original order (default: sort to consistent ordering) * ``--enable_code_actions`` Enable experimental code actions (default: false) +* ``--max_line_length`` Maximum line length (default: disabled) +* ``--max_comment_line_length`` Maximum comment line length (default: disabled) * ``--debug_log`` Write debug information to ``root_dir/fortls_debug.log`` (requires a specified ``root_dir`` during initialization) **Debug settings:** From 053984a054b249a379109b606c7d36324a7f3da7 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Wed, 3 Apr 2019 09:22:32 -0400 Subject: [PATCH 36/44] Remove redundant/double link resolution in subroutine/functions --- fortls/objects.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/fortls/objects.py b/fortls/objects.py index 99c0559..0984acd 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -789,17 +789,17 @@ def get_children(self, public_only=False): return tmp_list def resolve_arg_link(self, obj_tree): - if self.args == '': + if (self.args == '') or (len(self.in_children) > 0): return arg_list = self.args.replace(' ', '').split(',') arg_list_lower = self.args.lower().replace(' ', '').split(',') self.arg_objs = [None for arg in arg_list] - check_objs = copy.copy(self.children) - for child in self.children: - if child.is_external_int(): - check_objs += child.get_children() + # check_objs = copy.copy(self.children) + # for child in self.children: + # if child.is_external_int(): + # check_objs += child.get_children() self.missing_args = [] - for child in check_objs: + for child in self.children: ind = -1 for (i, arg) in enumerate(arg_list_lower): if arg == child.name.lower(): @@ -812,7 +812,6 @@ def resolve_arg_link(self, obj_tree): self.arg_objs[ind] = child if child.is_optional(): arg_list[ind] = "{0}={0}".format(arg_list[ind]) - child.resolve_link(obj_tree) self.args_snip = ",".join(arg_list) def resolve_link(self, obj_tree): @@ -1276,6 +1275,10 @@ def resolve_link(self, obj_tree): var_obj = find_in_scope(type_scope, var_stack[-1], obj_tree) if var_obj is not None: assoc_link[0].link_obj = var_obj + else: + var_obj = find_in_scope(self, assoc_link[2], obj_tree) + if var_obj is not None: + assoc_link[0].link_obj = var_obj class fortran_enum(fortran_block): From 87806e2beff0f4439ac8aefdff3e47a7dbaef69d Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Thu, 4 Apr 2019 10:52:22 -0400 Subject: [PATCH 37/44] Speedup traversal of user-defined type sequences, ref #101 --- fortls/objects.py | 63 ++++++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 33 deletions(-) diff --git a/fortls/objects.py b/fortls/objects.py index 0984acd..cbaff6b 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -125,7 +125,7 @@ def get_use_tree(scope, use_dict, obj_tree, only_list=[]): return use_dict -def find_in_scope(scope, var_name, obj_tree, interface=False): +def find_in_scope(scope, var_name, obj_tree, interface=False, local_only=False): def check_scope(local_scope, var_name_lower, filter_public=False): for child in local_scope.get_children(): if child.name.startswith("#GEN_INT"): @@ -142,7 +142,7 @@ def check_scope(local_scope, var_name_lower, filter_public=False): var_name_lower = var_name.lower() # Check local scope tmp_var = check_scope(scope, var_name_lower) - if tmp_var is not None: + if local_only or (tmp_var is not None): return tmp_var # Setup USE search use_dict = get_use_tree(scope, {}, obj_tree) @@ -283,52 +283,32 @@ def get_var_stack(line): def climb_type_tree(var_stack, curr_scope, obj_tree): """Walk up user-defined type sequence to determine final field type""" - def get_type_name(var_obj): - type_desc = get_paren_substring(var_obj.get_desc()) - if type_desc is not None: - type_desc = type_desc.strip().lower() - return type_desc # Find base variable in current scope - type_name = None - type_scope = None iVar = 0 var_name = var_stack[iVar].strip().lower() var_obj = find_in_scope(curr_scope, var_name, obj_tree) if var_obj is None: return None - else: - type_name = get_type_name(var_obj) - curr_scope = var_obj.parent # Search for type, then next variable in stack and so on for _ in range(30): - # Find variable type in available scopes - if type_name is None: - break - type_scope = find_in_scope(curr_scope, type_name, obj_tree) - # Exit if not found - if type_scope is None: - break - curr_scope = type_scope.parent + # Find variable type object + type_obj = var_obj.get_type_obj(obj_tree) + # Return if not found + if type_obj is None: + return None # Go to next variable in stack and exit if done iVar += 1 if iVar == len(var_stack)-1: break - # Find next variable by name in scope + # Find next variable by name in type var_name = var_stack[iVar].strip().lower() - var_obj = find_in_scope(type_scope, var_name, obj_tree) - # Set scope to declaration location if variable is inherited - if var_obj is not None: - curr_scope = var_obj.parent - if (var_obj.parent is not None) and (var_obj.parent.get_type() == CLASS_TYPE_ID): - for in_child in var_obj.parent.in_children: - if (in_child.name.lower() == var_name) and (in_child.parent is not None): - curr_scope = in_child.parent - type_name = get_type_name(var_obj) - else: - break + var_obj = find_in_scope(type_obj, var_name, obj_tree, local_only=True) + # Return if not found + if var_obj is None: + return None else: raise KeyError - return type_scope + return type_obj class fortran_diagnostic: @@ -414,6 +394,9 @@ def resolve_link(self, obj_tree): def get_type(self, no_link=False): return -1 + def get_type_obj(self, obj_tree): + return None + def get_desc(self): return 'unknown' @@ -1401,6 +1384,7 @@ def base_setup(self, file_ast, line_number, name, var_desc, keywords, self.vis = 0 self.parent = None self.link_obj = None + self.type_obj = None if link_obj is not None: self.link_name = link_obj.lower() else: @@ -1423,6 +1407,7 @@ def update_fqsn(self, enc_scope=None): child.update_fqsn(self.FQSN) def resolve_link(self, obj_tree): + self.link_obj = None if self.link_name is None: return if self.parent is not None: @@ -1442,6 +1427,18 @@ def get_desc(self): # Normal variable return self.desc + def get_type_obj(self, obj_tree): + if self.link_obj is not None: + return self.link_obj.get_type_obj(obj_tree) + if (self.type_obj is None) and (self.parent is not None): + type_name = get_paren_substring(self.desc) + if type_name is not None: + type_name = type_name.strip().lower() + type_obj = find_in_scope(self.parent, type_name, obj_tree) + if type_obj is not None: + self.type_obj = type_obj + return self.type_obj + def set_dim(self, dim_str): if KEYWORD_ID_DICT['dimension'] not in self.keywords: self.keywords.append(KEYWORD_ID_DICT['dimension']) From 65172822caa3daede31fa5d25e038fa1049e10c2 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Thu, 4 Apr 2019 11:18:25 -0400 Subject: [PATCH 38/44] Accelerate object cross-linking by flattening recursive link stage, ref #101 --- fortls/langserver.py | 18 ++++++-------- fortls/objects.py | 58 +++++++++++++++++++++++++++++++------------- 2 files changed, 48 insertions(+), 28 deletions(-) diff --git a/fortls/langserver.py b/fortls/langserver.py index d23f5e9..6317ccd 100644 --- a/fortls/langserver.py +++ b/fortls/langserver.py @@ -1131,10 +1131,9 @@ def serve_onSave(self, request, test_exist=False): file_obj.ast.resolve_includes(self.workspace, path=filepath) file_obj = self.workspace.get(filepath) file_obj.ast.resolve_includes(self.workspace) - # Update inheritance - for key in self.obj_tree: - self.obj_tree[key][0].resolve_inherit(self.obj_tree) - self.obj_tree[key][0].resolve_link(self.obj_tree) + # Update inheritance/links + for _, file_obj in self.workspace.items(): + file_obj.ast.resolve_links(self.obj_tree) self.send_diagnostics(uri) def add_file(self, filepath): @@ -1170,9 +1169,7 @@ def update_workspace_file(self, filepath, read_file=False, update_links=False): self.obj_tree[key] = [obj, filepath] # Update local links/inheritance if necessary if update_links: - for key, obj in ast_new.global_dict.items(): - obj.resolve_inherit(self.obj_tree) - obj.resolve_link(self.obj_tree) + ast_new.resolve_links(self.obj_tree) return None def workspace_init(self): @@ -1213,10 +1210,9 @@ def workspace_init(self): # Update include statements for _, file_obj in self.workspace.items(): file_obj.ast.resolve_includes(self.workspace) - # Update inheritance - for key in self.obj_tree: - self.obj_tree[key][0].resolve_inherit(self.obj_tree) - self.obj_tree[key][0].resolve_link(self.obj_tree) + # Update inheritance/links + for _, file_obj in self.workspace.items(): + file_obj.ast.resolve_links(self.obj_tree) def serve_exit(self, request): # Exit server diff --git a/fortls/objects.py b/fortls/objects.py index cbaff6b..3e743ad 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -388,9 +388,15 @@ def end(self, line_number): def resolve_inherit(self, obj_tree): return None + def require_inherit(self): + return False + def resolve_link(self, obj_tree): return None + def require_link(self): + return False + def get_type(self, no_link=False): return -1 @@ -496,14 +502,6 @@ def add_use(self, use_mod, line_number, only_list=[]): def set_inherit(self, inherit_type): self.inherit = inherit_type - def resolve_inherit(self, obj_tree): - for child in self.children: - child.resolve_inherit(obj_tree) - - def resolve_link(self, obj_tree): - for child in self.children: - child.resolve_link(obj_tree) - def set_parent(self, parent_obj): self.parent = parent_obj @@ -708,6 +706,9 @@ def resolve_inherit(self, obj_tree): if self.ancestor_name in obj_tree: self.ancestor_obj = obj_tree[self.ancestor_name][0] + def require_inherit(self): + return True + def resolve_link(self, obj_tree): # Link subroutine/function implementations to prototypes if self.ancestor_obj is None: @@ -728,9 +729,9 @@ def resolve_link(self, obj_tree): prototype.resolve_link(obj_tree) child.copy_interface(prototype) break - # Recurse into children - for child in self.children: - child.resolve_link(obj_tree) + + def require_link(self): + return True class fortran_subroutine(fortran_scope): @@ -799,8 +800,9 @@ def resolve_arg_link(self, obj_tree): def resolve_link(self, obj_tree): self.resolve_arg_link(obj_tree) - for child in self.children: - child.resolve_link(obj_tree) + + def require_link(self): + return True def get_type(self, no_link=False): return SUBROUTINE_TYPE_ID @@ -970,8 +972,6 @@ def resolve_link(self, obj_tree): for child in self.children: if child.name.lower() == result_var_lower: self.result_obj = child - for child in self.children: - child.resolve_link(obj_tree) def get_type(self, no_link=False): return FUNCTION_TYPE_ID @@ -1088,6 +1088,9 @@ def resolve_inherit(self, obj_tree): if child.name.lower() not in child_names: self.in_children.append(child) + def require_inherit(self): + return True + def get_overriden(self, field_name): ret_list = [] field_name = field_name.lower() @@ -1263,6 +1266,9 @@ def resolve_link(self, obj_tree): if var_obj is not None: assoc_link[0].link_obj = var_obj + def require_link(self): + return True + class fortran_enum(fortran_block): def __init__(self, file_ast, line_number, name): @@ -1358,8 +1364,9 @@ def resolve_link(self, obj_tree): mem_obj = find_in_scope(self.parent, member, obj_tree) if mem_obj is not None: self.mems.append(mem_obj) - for child in self.children: - child.resolve_link(obj_tree) + + def require_link(self): + return True class fortran_var(fortran_obj): @@ -1415,6 +1422,9 @@ def resolve_link(self, obj_tree): if link_obj is not None: self.link_obj = link_obj + def require_link(self): + return (self.link_name is not None) + def get_type(self, no_link=False): if (not no_link) and (self.link_obj is not None): return self.link_obj.get_type() @@ -1639,6 +1649,8 @@ def __init__(self, file_obj=None): self.include_stmnts = [] self.end_errors = [] self.parse_errors = [] + self.inherit_objs = [] + self.linkable_objs = [] self.none_scope = None self.inc_scope = None self.current_scope = None @@ -1662,6 +1674,10 @@ def get_enc_scope_name(self): def add_scope(self, new_scope, END_SCOPE_WORD, exportable=True, req_container=False): self.scope_list.append(new_scope) + if new_scope.require_inherit(): + self.inherit_objs.append(new_scope) + if new_scope.require_link(): + self.linkable_objs.append(new_scope) if self.current_scope is None: if req_container: self.create_none_scope() @@ -1705,6 +1721,8 @@ def add_variable(self, new_var): new_var.FQSN = self.none_scope.FQSN + "::" + new_var.name.lower() self.current_scope.add_child(new_var) self.variable_list.append(new_var) + if new_var.require_link(): + self.linkable_objs.append(new_var) self.last_obj = new_var if self.pending_doc is not None: self.last_obj.add_doc(self.pending_doc) @@ -1824,6 +1842,12 @@ def resolve_includes(self, workspace, path=None): include_ast.none_scope = parent_scope include_path[2] = added_entities + def resolve_links(self, obj_tree): + for inherit_obj in self.inherit_objs: + inherit_obj.resolve_inherit(obj_tree) + for linkable_obj in self.linkable_objs: + linkable_obj.resolve_link(obj_tree) + def close_file(self, line_number): # Close open scopes while self.current_scope is not None: From 541d415d8e7479d502ae27ec9f7aa87d36a152f7 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Thu, 4 Apr 2019 16:01:31 -0400 Subject: [PATCH 39/44] Avoid unnecessary "re-resolution" of type inheritance during link phase, ref #101 --- fortls/langserver.py | 10 +++++++--- fortls/objects.py | 25 ++++++++++++------------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/fortls/langserver.py b/fortls/langserver.py index 6317ccd..2eaf059 100644 --- a/fortls/langserver.py +++ b/fortls/langserver.py @@ -64,6 +64,7 @@ def __init__(self, conn, debug_log=False, settings={}): self.all_symbols = None self.workspace = {} self.obj_tree = {} + self.link_version = 0 self.source_dirs = [] self.excl_paths = [] self.excl_suffixes = [] @@ -1132,8 +1133,9 @@ def serve_onSave(self, request, test_exist=False): file_obj = self.workspace.get(filepath) file_obj.ast.resolve_includes(self.workspace) # Update inheritance/links + self.link_version = (self.link_version + 1) % 1000 for _, file_obj in self.workspace.items(): - file_obj.ast.resolve_links(self.obj_tree) + file_obj.ast.resolve_links(self.obj_tree, self.link_version) self.send_diagnostics(uri) def add_file(self, filepath): @@ -1169,7 +1171,8 @@ def update_workspace_file(self, filepath, read_file=False, update_links=False): self.obj_tree[key] = [obj, filepath] # Update local links/inheritance if necessary if update_links: - ast_new.resolve_links(self.obj_tree) + self.link_version = (self.link_version + 1) % 1000 + ast_new.resolve_links(self.obj_tree, self.link_version) return None def workspace_init(self): @@ -1211,8 +1214,9 @@ def workspace_init(self): for _, file_obj in self.workspace.items(): file_obj.ast.resolve_includes(self.workspace) # Update inheritance/links + self.link_version = (self.link_version + 1) % 1000 for _, file_obj in self.workspace.items(): - file_obj.ast.resolve_links(self.obj_tree) + file_obj.ast.resolve_links(self.obj_tree, self.link_version) def serve_exit(self, request): # Exit server diff --git a/fortls/objects.py b/fortls/objects.py index 3e743ad..7dc83aa 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -385,7 +385,7 @@ def update_fqsn(self, enc_scope=None): def end(self, line_number): self.eline = line_number - def resolve_inherit(self, obj_tree): + def resolve_inherit(self, obj_tree, inherit_version): return None def require_inherit(self): @@ -700,7 +700,7 @@ def get_ancestors(self): return [self.ancestor_obj] return [] - def resolve_inherit(self, obj_tree): + def resolve_inherit(self, obj_tree, inherit_version): if self.ancestor_name is None: return if self.ancestor_name in obj_tree: @@ -1048,6 +1048,7 @@ def __init__(self, file_ast, line_number, name, keywords): self.inherit = None self.inherit_var = None self.inherit_tmp = None + self.inherit_version = -1 if self.keywords.count(KEYWORD_ID_DICT['public']) > 0: self.vis = 1 if self.keywords.count(KEYWORD_ID_DICT['private']) > 0: @@ -1064,24 +1065,22 @@ def get_children(self, public_only=False): tmp_list.extend(self.in_children) return tmp_list - def resolve_inherit(self, obj_tree): - if self.inherit is None: + def resolve_inherit(self, obj_tree, inherit_version): + if (self.inherit is None) or (self.inherit_version == inherit_version): return - # + self.inherit_version = inherit_version self.inherit_var = find_in_scope(self.parent, self.inherit, obj_tree) if self.inherit_var is not None: - # Disable "resolve_inherit" to allow circular type references + # Resolve parent inheritance while avoiding circular recursion self.inherit_tmp = self.inherit self.inherit = None - self.inherit_var.resolve_inherit(obj_tree) + self.inherit_var.resolve_inherit(obj_tree, inherit_version) + self.inherit = self.inherit_tmp + self.inherit_tmp = None # Get current fields child_names = [] for child in self.children: child_names.append(child.name.lower()) - child.resolve_inherit(obj_tree) - # Re-enable "resolve_inherit" to allow circular type references - self.inherit = self.inherit_tmp - self.inherit_tmp = None # Import for parent objects self.in_children = [] for child in self.inherit_var.get_children(): @@ -1842,9 +1841,9 @@ def resolve_includes(self, workspace, path=None): include_ast.none_scope = parent_scope include_path[2] = added_entities - def resolve_links(self, obj_tree): + def resolve_links(self, obj_tree, link_version): for inherit_obj in self.inherit_objs: - inherit_obj.resolve_inherit(obj_tree) + inherit_obj.resolve_inherit(obj_tree, inherit_version=link_version) for linkable_obj in self.linkable_objs: linkable_obj.resolve_link(obj_tree) From 25c92c706b10d5a66ff261afed593c1c7247d544 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Fri, 5 Apr 2019 13:56:25 -0400 Subject: [PATCH 40/44] Add VSCode folder to ignored paths for Git --- .gitignore | 1 + fortls/langserver.py | 6 ++---- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index 0d20b64..6f44278 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ *.pyc +.vscode \ No newline at end of file diff --git a/fortls/langserver.py b/fortls/langserver.py index 2eaf059..9b3f8e6 100644 --- a/fortls/langserver.py +++ b/fortls/langserver.py @@ -1108,10 +1108,8 @@ def serve_onChange(self, request): for _, tmp_file in self.workspace.items(): tmp_file.ast.resolve_includes(self.workspace, path=path) file_obj.ast.resolve_includes(self.workspace) - # Update inheritance (currently only on open/save) - # for key in self.obj_tree: - # self.obj_tree[key][0].resolve_inherit(self.obj_tree) - # self.obj_tree[key][0].resolve_link(self.obj_tree) + # Update inheritance (currently file only) + # tmp_file.ast.resolve_links(self.obj_tree, self.link_version) def serve_onClose(self, request): self.serve_onSave(request, test_exist=True) From 2f04a2e89a3162c7273f5c69e5dfeda6065a7db6 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Fri, 5 Apr 2019 14:06:12 -0400 Subject: [PATCH 41/44] Release version 1.8.0 --- CHANGELOG.md | 11 +++++++++++ fortls/__init__.py | 2 +- setup.py | 4 ++-- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3801493..66b73f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +## 1.8.0 + +### Improvements +* Add full support for ASSOCIATE statements, fixes [#101](https://github.com/hansec/fortran-language-server/issues/101) +* Filter completion suggestions after "MODULE PROCEDURE" statements, fixes [#103](https://github.com/hansec/fortran-language-server/issues/103) +* Filter completion suggestions in type-bound procedure links +* Add support for including external source file directories +* Diagnostics: Line length exceeds maximum length errors +* Speedup language server initialization +* Speedup "textDocument/references" requests + ## 1.7.3 ### Fixes diff --git a/fortls/__init__.py b/fortls/__init__.py index b782663..e951478 100644 --- a/fortls/__init__.py +++ b/fortls/__init__.py @@ -6,7 +6,7 @@ from .langserver import LangServer from .jsonrpc import JSONRPC2Connection, ReadWriter, path_from_uri from .parse_fortran import fortran_file, process_file -__version__ = '1.7.3' +__version__ = '1.8.0' def error_exit(error_str): diff --git a/setup.py b/setup.py index 3aaf34b..20f5630 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ # Versions should comply with PEP440. For a discussion on single-sourcing # the version across setup.py and the project code, see # https://packaging.python.org/en/latest/single_source_version.html - version='1.7.3', + version='1.8.0', description='FORTRAN Language Server for the Language Server Protocol', @@ -17,7 +17,7 @@ # The project's main homepage. url='https://github.com/hansec/fortran-language-server', - download_url = 'https://github.com/hansec/fortran-language-server/archive/v1.7.3.tar.gz', + download_url = 'https://github.com/hansec/fortran-language-server/archive/v1.8.0.tar.gz', author='Chris Hansen', author_email = 'hansec@uw.edu', From 4ab414a047298f9e31e782439281c3e728768a2c Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Fri, 5 Apr 2019 14:35:14 -0400 Subject: [PATCH 42/44] Maintain line length during tab character replacement, fixes #93 --- fortls/parse_fortran.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fortls/parse_fortran.py b/fortls/parse_fortran.py index 286d0b7..c8f0961 100644 --- a/fortls/parse_fortran.py +++ b/fortls/parse_fortran.py @@ -699,11 +699,11 @@ def load_from_disk(self): try: if PY3K: with open(self.path, 'r', encoding='utf-8', errors='replace') as fhandle: - contents = re.sub(r'\t', r' ', fhandle.read()) + contents = re.sub(r'\t', r' ', fhandle.read()) self.contents_split = contents.splitlines() else: with io.open(self.path, 'r', encoding='utf-8', errors='replace') as fhandle: - contents = re.sub(r'\t', r' ', fhandle.read()) + contents = re.sub(r'\t', r' ', fhandle.read()) self.contents_split = contents.splitlines() self.fixed = detect_fixed_format(self.contents_split) self.contents_pp = self.contents_split From fae86758ee4cddee3337ac5c256bf3c94ba92291 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Wed, 10 Apr 2019 08:20:16 -0400 Subject: [PATCH 43/44] Fix bug with requests following "WRITE(*,*)" statements --- fortls/objects.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/fortls/objects.py b/fortls/objects.py index 7dc83aa..d227086 100644 --- a/fortls/objects.py +++ b/fortls/objects.py @@ -258,6 +258,7 @@ def get_var_stack(line): Examples: "myvar%foo%bar" -> ["myvar", "foo", "bar"] + "myarray(i)%foo%bar" -> ["myarray", "foo", "bar"] "CALL self%method(this%foo" -> ["this", "foo"] """ if len(line) == 0: @@ -265,14 +266,14 @@ def get_var_stack(line): final_var, sections = get_paren_level(line) if final_var == '': return [''] - if final_var.find('%') < 0: - final_paren = sections[-1] - ntail = final_paren[1] - final_paren[0] - # - if ntail == 0: - final_var = '' - elif ntail > 0: - final_var = final_var[len(final_var)-ntail:] + # Continuation of variable after paren requires '%' character + iLast = 0 + for (i, section) in enumerate(sections): + if (not line[section[0]:section[1]].startswith('%')): + iLast = i + final_var = '' + for section in sections[iLast:]: + final_var += line[section[0]:section[1]] # if final_var is not None: final_op_split = OBJBREAK_REGEX.split(final_var) From c92fdf0b512b75c90588eabc30a58081b34888f7 Mon Sep 17 00:00:00 2001 From: Chris Hansen Date: Thu, 11 Apr 2019 11:06:06 -0400 Subject: [PATCH 44/44] Release version 1.8.1 --- CHANGELOG.md | 6 ++++++ fortls/__init__.py | 2 +- setup.py | 4 ++-- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 66b73f2..b080ae8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +## 1.8.1 + +### Fixes +* Fix bug with requests in lines with tab characters, fixes [#93](https://github.com/hansec/fortran-language-server/issues/93) +* Fix bug with requests following "WRITE(*,*)" statements + ## 1.8.0 ### Improvements diff --git a/fortls/__init__.py b/fortls/__init__.py index e951478..cf2687d 100644 --- a/fortls/__init__.py +++ b/fortls/__init__.py @@ -6,7 +6,7 @@ from .langserver import LangServer from .jsonrpc import JSONRPC2Connection, ReadWriter, path_from_uri from .parse_fortran import fortran_file, process_file -__version__ = '1.8.0' +__version__ = '1.8.1' def error_exit(error_str): diff --git a/setup.py b/setup.py index 20f5630..603dc0a 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ # Versions should comply with PEP440. For a discussion on single-sourcing # the version across setup.py and the project code, see # https://packaging.python.org/en/latest/single_source_version.html - version='1.8.0', + version='1.8.1', description='FORTRAN Language Server for the Language Server Protocol', @@ -17,7 +17,7 @@ # The project's main homepage. url='https://github.com/hansec/fortran-language-server', - download_url = 'https://github.com/hansec/fortran-language-server/archive/v1.8.0.tar.gz', + download_url = 'https://github.com/hansec/fortran-language-server/archive/v1.8.1.tar.gz', author='Chris Hansen', author_email = 'hansec@uw.edu',