diff --git a/.gitmodules b/.gitmodules index 21e7c603..31a788ee 100644 --- a/.gitmodules +++ b/.gitmodules @@ -340,9 +340,6 @@ [submodule "vendor/grammars/latex.tmbundle"] path = vendor/grammars/latex.tmbundle url = https://github.com/textmate/latex.tmbundle -[submodule "vendor/grammars/less.tmbundle"] - path = vendor/grammars/less.tmbundle - url = https://github.com/textmate/less.tmbundle [submodule "vendor/grammars/lilypond.tmbundle"] path = vendor/grammars/lilypond.tmbundle url = https://github.com/textmate/lilypond.tmbundle @@ -676,7 +673,7 @@ url = https://github.com/CausalityLtd/sublime-pony [submodule "vendor/grammars/X10"] path = vendor/grammars/X10 - url = git@github.com:x10-lang/x10-highlighting.git + url = https://github.com/x10-lang/x10-highlighting [submodule "vendor/grammars/language-babel"] path = vendor/grammars/language-babel url = https://github.com/gandm/language-babel @@ -691,7 +688,7 @@ url = https://github.com/freemarker/FreeMarker.tmbundle [submodule "vendor/grammars/MagicPython"] path = vendor/grammars/MagicPython - url = git@github.com:MagicStack/MagicPython.git + url = https://github.com/MagicStack/MagicPython [submodule "vendor/grammars/language-click"] path = vendor/grammars/language-click url = https://github.com/stenverbois/language-click.git @@ -706,7 +703,16 @@ url = https://github.com/erkyrath/language-inform7 [submodule "vendor/grammars/atom-language-stan"] path = vendor/grammars/atom-language-stan - url = git@github.com:jrnold/atom-language-stan.git + url = https://github.com/jrnold/atom-language-stan +[submodule "vendor/grammars/language-yang"] + path = vendor/grammars/language-yang + url = https://github.com/DzonyKalafut/language-yang.git +[submodule "vendor/grammars/perl6fe"] + path = vendor/grammars/perl6fe + url = https://github.com/MadcapJake/language-perl6fe.git +[submodule "vendor/grammars/language-less"] + path = vendor/grammars/language-less + url = https://github.com/atom/language-less.git [submodule "vendor/grammars/language-povray"] path = vendor/grammars/language-povray url = https://github.com/c-lipka/language-povray diff --git a/README.md b/README.md index 36bcbd50..2d661ef9 100644 --- a/README.md +++ b/README.md @@ -59,6 +59,9 @@ Alternatively, you can use Vim or Emacs style modelines to set the language for ##### Vim ``` +# Some examples of various styles: +vim: syntax=java +vim: set syntax=ruby: vim: set filetype=prolog: vim: set ft=cpp: ``` diff --git a/grammars.yml b/grammars.yml index a8882b15..706cdaa0 100755 --- a/grammars.yml +++ b/grammars.yml @@ -354,6 +354,8 @@ vendor/grammars/language-javascript: vendor/grammars/language-jsoniq/: - source.jq - source.xq +vendor/grammars/language-less/: +- source.css.less vendor/grammars/language-maxscript: - source.maxscript vendor/grammars/language-ncl: @@ -374,6 +376,8 @@ vendor/grammars/language-xbase: - source.harbour vendor/grammars/language-yaml: - source.yaml +vendor/grammars/language-yang/: +- source.yang vendor/grammars/latex.tmbundle: - text.bibtex - text.log.latex @@ -381,8 +385,6 @@ vendor/grammars/latex.tmbundle: - text.tex.latex - text.tex.latex.beamer - text.tex.latex.memoir -vendor/grammars/less.tmbundle: -- source.css.less vendor/grammars/lilypond.tmbundle: - source.lilypond vendor/grammars/liquid.tmbundle: @@ -445,6 +447,10 @@ vendor/grammars/pascal.tmbundle: vendor/grammars/perl.tmbundle/: - source.perl - source.perl.6 +vendor/grammars/perl6fe: +- source.meta-info +- source.perl6fe +- source.regexp.perl6fe vendor/grammars/php-smarty.tmbundle: - text.html.smarty vendor/grammars/php.tmbundle: diff --git a/lib/linguist/generated.rb b/lib/linguist/generated.rb index 35766e4d..7747406f 100644 --- a/lib/linguist/generated.rb +++ b/lib/linguist/generated.rb @@ -72,7 +72,9 @@ module Linguist vcr_cassette? || generated_module? || generated_unity3d_meta? || - generated_racc? + generated_racc? || + generated_jflex? || + generated_grammarkit? end # Internal: Is the blob an Xcode file? @@ -373,5 +375,32 @@ module Linguist return false unless lines.count > 2 return lines[2].start_with?("# This file is automatically generated by Racc") end + + # Internal: Is this a JFlex-generated file? + # + # A JFlex-generated file contains: + # /* The following code was generated by JFlex x.y.z on d/at/e ti:me */ + # on the first line. + # + # Return true or false + def generated_jflex? + return false unless extname == '.java' + return false unless lines.count > 1 + return lines[0].start_with?("/* The following code was generated by JFlex ") + end + + # Internal: Is this a GrammarKit-generated file? + # + # A GrammarKit-generated file typically contain: + # // This is a generated file. Not intended for manual editing. + # on the first line. This is not always the case, as it's possible to + # customize the class header. + # + # Return true or false + def generated_grammarkit? + return false unless extname == '.java' + return false unless lines.count > 1 + return lines[0].start_with?("// This is a generated file. Not intended for manual editing.") + end end end diff --git a/lib/linguist/heuristics.rb b/lib/linguist/heuristics.rb index 01be4e33..0806ce1f 100644 --- a/lib/linguist/heuristics.rb +++ b/lib/linguist/heuristics.rb @@ -293,6 +293,14 @@ module Linguist end end + disambiguate ".pod" do |data| + if /^=\w+$/.match(data) + Language["Pod"] + else + Language["Perl"] + end + end + disambiguate ".pro" do |data| if /^[^#]+:-/.match(data) Language["Prolog"] diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 383cff72..5a8ae4e8 100755 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -183,6 +183,7 @@ AppleScript: interpreters: - osascript ace_mode: applescript + color: "#101F1F" Arc: type: programming @@ -290,6 +291,7 @@ Batchfile: - .cmd tm_scope: source.dosbatch ace_mode: batchfile + color: "#C1F12E" Befunge: type: programming @@ -304,6 +306,7 @@ Bison: extensions: - .bison ace_mode: text + color: "#6A463F" BitBake: type: programming @@ -638,7 +641,7 @@ Common Lisp: Component Pascal: type: programming - color: "#b0ce4e" + color: "#B0CE4E" extensions: - .cp - .cps @@ -701,6 +704,7 @@ Cucumber: aliases: - gherkin ace_mode: text + color: "#5B2063" Cuda: type: programming @@ -709,6 +713,7 @@ Cuda: - .cuh tm_scope: source.cuda-c++ ace_mode: c_cpp + color: "#3A4E3A" Cycript: type: programming @@ -801,7 +806,6 @@ Dart: Diff: type: data - color: "#88dddd" extensions: - .diff - .patch @@ -939,6 +943,8 @@ Erlang: - .es - .escript - .hrl + - .xrl + - .yrl filenames: - rebar.config - rebar.config.lock @@ -1390,6 +1396,7 @@ Hack: - .hh - .php tm_scope: text.html.php + color: "#878787" Haml: group: HTML @@ -1398,6 +1405,7 @@ Haml: - .haml - .haml.deface ace_mode: haml + color: "#ECE2A9" Handlebars: type: markup @@ -1764,6 +1772,7 @@ LLVM: extensions: - .ll ace_mode: text + color: "#185619" LOLCODE: type: programming @@ -1827,6 +1836,7 @@ Less: - .less tm_scope: source.css.less ace_mode: less + color: "#A1D9A1" Lex: type: programming @@ -2019,6 +2029,8 @@ Makefile: - GNUmakefile - Kbuild - Makefile + - Makefile.am + - Makefile.in - Makefile.inc - makefile interpreters: @@ -2045,6 +2057,7 @@ Markdown: - .mkdown - .ron tm_scope: source.gfm + color: "#083FA1" Mask: type: markup @@ -2284,6 +2297,7 @@ Nginx: aliases: - nginx configuration file ace_mode: text + color: "#9469E9" Nimrod: type: programming @@ -2342,6 +2356,7 @@ NumPy: - .numsc tm_scope: none ace_mode: text + color: "#9C8AF9" OCaml: type: programming @@ -2602,7 +2617,7 @@ Parrot Internal Representation: Pascal: type: programming - color: "#b0ce4e" + color: "#E3F171" extensions: - .pas - .dfm @@ -2651,7 +2666,7 @@ Perl6: - Rexfile interpreters: - perl6 - tm_scope: source.perl.6 + tm_scope: source.perl6fe ace_mode: perl Pickle: @@ -2746,8 +2761,10 @@ Prolog: - .pl - .pro - .prolog + - .yap interpreters: - swipl + - yap tm_scope: source.prolog ace_mode: prolog @@ -2817,6 +2834,7 @@ Python: color: "#3572A5" extensions: - .py + - .bzl - .cgi - .fcgi - .gyp @@ -2870,7 +2888,7 @@ QMake: R: type: programming - color: "#198ce7" + color: "#198CE7" aliases: - R - Rscript @@ -2900,6 +2918,7 @@ RDoc: extensions: - .rdoc tm_scope: text.rdoc + color: "#8E84BF" REALbasic: type: programming @@ -3091,6 +3110,7 @@ Rust: color: "#dea584" extensions: - .rs + - .rs.in ace_mode: rust SAS: @@ -3108,6 +3128,7 @@ SCSS: ace_mode: scss extensions: - .scss + color: "#CF649A" SMT: type: programming @@ -3210,6 +3231,7 @@ Sass: extensions: - .sass ace_mode: sass + color: "#CF649A" Scala: type: programming @@ -3267,6 +3289,7 @@ Shell: color: "#89e051" aliases: - sh + - shell-script - bash - zsh extensions: @@ -3277,6 +3300,7 @@ Shell: - .command - .fcgi - .ksh + - .sh.in - .tmux - .tool - .zsh @@ -3575,7 +3599,6 @@ Unified Parallel C: Unity3D Asset: type: data ace_mode: yaml - color: "#ab69a1" extensions: - .anim - .asset @@ -3585,6 +3608,13 @@ Unity3D Asset: - .unity tm_scope: source.yaml +Uno: + type: programming + extensions: + - .uno + ace_mode: csharp + tm_scope: source.cs + UnrealScript: type: programming color: "#a54c4d" @@ -3791,6 +3821,7 @@ XML: - .tsx - .ui - .urdf + - .ux - .vbproj - .vcxproj - .vxml @@ -3807,6 +3838,7 @@ XML: - .xliff - .xmi - .xml.dist + - .xproj - .xsd - .xul - .zcml @@ -3863,6 +3895,7 @@ XSLT: - .xsl tm_scope: text.xml.xsl ace_mode: xml + color: "#EB8CEB" Xojo: type: programming @@ -3896,6 +3929,13 @@ YAML: - .yaml-tmlanguage ace_mode: yaml +YANG: + type: data + extensions: + - .yang + tm_scope: source.yang + ace_mode: text + Yacc: type: programming extensions: @@ -3904,6 +3944,7 @@ Yacc: - .yy tm_scope: source.bison ace_mode: text + color: "#4B6C4B" Zephir: type: programming @@ -3943,7 +3984,6 @@ eC: edn: type: data ace_mode: clojure - color: "#db5855" extensions: - .edn tm_scope: source.clojure @@ -3988,6 +4028,7 @@ reStructuredText: - .rest.txt - .rst.txt ace_mode: text + color: "#B3BCBC" wisp: type: programming diff --git a/lib/linguist/strategy/modeline.rb b/lib/linguist/strategy/modeline.rb index f995d940..eb5a8a5f 100644 --- a/lib/linguist/strategy/modeline.rb +++ b/lib/linguist/strategy/modeline.rb @@ -1,8 +1,19 @@ module Linguist module Strategy class Modeline - EmacsModeline = /-\*-\s*(?:(?!mode)[\w-]+\s*:\s*(?:[\w+-]+)\s*;?\s*)*(?:mode\s*:)?\s*([\w+-]+)\s*(?:;\s*(?!mode)[\w-]+\s*:\s*[\w+-]+\s*)*;?\s*-\*-/i - VimModeline = /vim:\s*set.*\s(?:ft|filetype)=(\w+)\s?.*:/i + EMACS_MODELINE = /-\*-\s*(?:(?!mode)[\w-]+\s*:\s*(?:[\w+-]+)\s*;?\s*)*(?:mode\s*:)?\s*([\w+-]+)\s*(?:;\s*(?!mode)[\w-]+\s*:\s*[\w+-]+\s*)*;?\s*-\*-/i + + # First form vim modeline + # [text]{white}{vi:|vim:|ex:}[white]{options} + # ex: 'vim: syntax=ruby' + VIM_MODELINE_1 = /(?:vim|vi|ex):\s*(?:ft|filetype|syntax)=(\w+)\s?/i + + # Second form vim modeline (compatible with some versions of Vi) + # [text]{white}{vi:|vim:|Vim:|ex:}[white]se[t] {options}:[text] + # ex: 'vim set syntax=ruby:' + VIM_MODELINE_2 = /(?:vim|vi|Vim|ex):\s*se(?:t)?.*\s(?:ft|filetype|syntax)=(\w+)\s?.*:/i + + MODELINES = [EMACS_MODELINE, VIM_MODELINE_1, VIM_MODELINE_2] # Public: Detects language based on Vim and Emacs modelines # @@ -22,7 +33,7 @@ module Linguist # # Returns a String or nil def self.modeline(data) - match = data.match(EmacsModeline) || data.match(VimModeline) + match = MODELINES.map { |regex| data.match(regex) }.reject(&:nil?).first match[1] if match end end diff --git a/lib/linguist/version.rb b/lib/linguist/version.rb index b389c1c9..bc1707a9 100644 --- a/lib/linguist/version.rb +++ b/lib/linguist/version.rb @@ -1,3 +1,3 @@ module Linguist - VERSION = "4.7.3" + VERSION = "4.7.5" end diff --git a/samples/Erlang/elixir_parser.yrl b/samples/Erlang/elixir_parser.yrl new file mode 100644 index 00000000..f49f8539 --- /dev/null +++ b/samples/Erlang/elixir_parser.yrl @@ -0,0 +1,856 @@ +Nonterminals + grammar expr_list + expr container_expr block_expr access_expr + no_parens_expr no_parens_zero_expr no_parens_one_expr no_parens_one_ambig_expr + bracket_expr bracket_at_expr bracket_arg matched_expr unmatched_expr max_expr + unmatched_op_expr matched_op_expr no_parens_op_expr no_parens_many_expr + comp_op_eol at_op_eol unary_op_eol and_op_eol or_op_eol capture_op_eol + add_op_eol mult_op_eol two_op_eol three_op_eol pipe_op_eol stab_op_eol + arrow_op_eol match_op_eol when_op_eol in_op_eol in_match_op_eol + type_op_eol rel_op_eol + open_paren close_paren empty_paren eoe + list list_args open_bracket close_bracket + tuple open_curly close_curly + bit_string open_bit close_bit + map map_op map_close map_args map_expr struct_op + assoc_op_eol assoc_expr assoc_base assoc_update assoc_update_kw assoc + container_args_base container_args + call_args_parens_expr call_args_parens_base call_args_parens parens_call + call_args_no_parens_one call_args_no_parens_ambig call_args_no_parens_expr + call_args_no_parens_comma_expr call_args_no_parens_all call_args_no_parens_many + call_args_no_parens_many_strict + stab stab_eoe stab_expr stab_op_eol_and_expr stab_parens_many + kw_eol kw_base kw call_args_no_parens_kw_expr call_args_no_parens_kw + dot_op dot_alias dot_alias_container + dot_identifier dot_op_identifier dot_do_identifier + dot_paren_identifier dot_bracket_identifier + do_block fn_eoe do_eoe end_eoe block_eoe block_item block_list + . + +Terminals + identifier kw_identifier kw_identifier_safe kw_identifier_unsafe bracket_identifier + paren_identifier do_identifier block_identifier + fn 'end' aliases + number atom atom_safe atom_unsafe bin_string list_string sigil + dot_call_op op_identifier + comp_op at_op unary_op and_op or_op arrow_op match_op in_op in_match_op + type_op dual_op add_op mult_op two_op three_op pipe_op stab_op when_op assoc_op + capture_op rel_op + 'true' 'false' 'nil' 'do' eol ';' ',' '.' + '(' ')' '[' ']' '{' '}' '<<' '>>' '%{}' '%' + . + +Rootsymbol grammar. + +%% Two shift/reduce conflicts coming from call_args_parens. +Expect 2. + +%% Changes in ops and precedence should be reflected on lib/elixir/lib/macro.ex +%% Note though the operator => in practice has lower precedence than all others, +%% its entry in the table is only to support the %{user | foo => bar} syntax. +Left 5 do. +Right 10 stab_op_eol. %% -> +Left 20 ','. +Nonassoc 30 capture_op_eol. %% & +Left 40 in_match_op_eol. %% <-, \\ (allowed in matches along =) +Right 50 when_op_eol. %% when +Right 60 type_op_eol. %% :: +Right 70 pipe_op_eol. %% | +Right 80 assoc_op_eol. %% => +Right 90 match_op_eol. %% = +Left 130 or_op_eol. %% ||, |||, or +Left 140 and_op_eol. %% &&, &&&, and +Left 150 comp_op_eol. %% ==, !=, =~, ===, !== +Left 160 rel_op_eol. %% <, >, <=, >= +Left 170 arrow_op_eol. %% |>, <<<, >>>, ~>>, <<~, ~>, <~, <~>, <|> +Left 180 in_op_eol. %% in +Left 190 three_op_eol. %% ^^^ +Right 200 two_op_eol. %% ++, --, .., <> +Left 210 add_op_eol. %% +, - +Left 220 mult_op_eol. %% *, / +Nonassoc 300 unary_op_eol. %% +, -, !, ^, not, ~~~ +Left 310 dot_call_op. +Left 310 dot_op. %% . +Nonassoc 320 at_op_eol. %% @ +Nonassoc 330 dot_identifier. + +%%% MAIN FLOW OF EXPRESSIONS + +grammar -> eoe : nil. +grammar -> expr_list : to_block('$1'). +grammar -> eoe expr_list : to_block('$2'). +grammar -> expr_list eoe : to_block('$1'). +grammar -> eoe expr_list eoe : to_block('$2'). +grammar -> '$empty' : nil. + +% Note expressions are on reverse order +expr_list -> expr : ['$1']. +expr_list -> expr_list eoe expr : ['$3'|'$1']. + +expr -> matched_expr : '$1'. +expr -> no_parens_expr : '$1'. +expr -> unmatched_expr : '$1'. + +%% In Elixir we have three main call syntaxes: with parentheses, +%% without parentheses and with do blocks. They are represented +%% in the AST as matched, no_parens and unmatched. +%% +%% Calls without parentheses are further divided according to how +%% problematic they are: +%% +%% (a) no_parens_one: a call with one unproblematic argument +%% (e.g. `f a` or `f g a` and similar) (includes unary operators) +%% +%% (b) no_parens_many: a call with several arguments (e.g. `f a, b`) +%% +%% (c) no_parens_one_ambig: a call with one argument which is +%% itself a no_parens_many or no_parens_one_ambig (e.g. `f g a, b` +%% or `f g h a, b` and similar) +%% +%% Note, in particular, that no_parens_one_ambig expressions are +%% ambiguous and are interpreted such that the outer function has +%% arity 1 (e.g. `f g a, b` is interpreted as `f(g(a, b))` rather +%% than `f(g(a), b)`). Hence the name, no_parens_one_ambig. +%% +%% The distinction is required because we can't, for example, have +%% a function call with a do block as argument inside another do +%% block call, unless there are parentheses: +%% +%% if if true do true else false end do #=> invalid +%% if(if true do true else false end) do #=> valid +%% +%% Similarly, it is not possible to nest calls without parentheses +%% if their arity is more than 1: +%% +%% foo a, bar b, c #=> invalid +%% foo(a, bar b, c) #=> invalid +%% foo bar a, b #=> valid +%% foo a, bar(b, c) #=> valid +%% +%% So the different grammar rules need to take into account +%% if calls without parentheses are do blocks in particular +%% segments and act accordingly. +matched_expr -> matched_expr matched_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +matched_expr -> unary_op_eol matched_expr : build_unary_op('$1', '$2'). +matched_expr -> at_op_eol matched_expr : build_unary_op('$1', '$2'). +matched_expr -> capture_op_eol matched_expr : build_unary_op('$1', '$2'). +matched_expr -> no_parens_one_expr : '$1'. +matched_expr -> no_parens_zero_expr : '$1'. +matched_expr -> access_expr : '$1'. +matched_expr -> access_expr kw_identifier : throw_invalid_kw_identifier('$2'). + +unmatched_expr -> matched_expr unmatched_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +unmatched_expr -> unmatched_expr matched_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +unmatched_expr -> unmatched_expr unmatched_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +unmatched_expr -> unmatched_expr no_parens_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +unmatched_expr -> unary_op_eol expr : build_unary_op('$1', '$2'). +unmatched_expr -> at_op_eol expr : build_unary_op('$1', '$2'). +unmatched_expr -> capture_op_eol expr : build_unary_op('$1', '$2'). +unmatched_expr -> block_expr : '$1'. + +no_parens_expr -> matched_expr no_parens_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +no_parens_expr -> unary_op_eol no_parens_expr : build_unary_op('$1', '$2'). +no_parens_expr -> at_op_eol no_parens_expr : build_unary_op('$1', '$2'). +no_parens_expr -> capture_op_eol no_parens_expr : build_unary_op('$1', '$2'). +no_parens_expr -> no_parens_one_ambig_expr : '$1'. +no_parens_expr -> no_parens_many_expr : '$1'. + +block_expr -> parens_call call_args_parens do_block : build_identifier('$1', '$2' ++ '$3'). +block_expr -> parens_call call_args_parens call_args_parens do_block : build_nested_parens('$1', '$2', '$3' ++ '$4'). +block_expr -> dot_do_identifier do_block : build_identifier('$1', '$2'). +block_expr -> dot_identifier call_args_no_parens_all do_block : build_identifier('$1', '$2' ++ '$3'). + +matched_op_expr -> match_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> add_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> mult_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> two_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> three_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> and_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> or_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> in_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> in_match_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> type_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> when_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> pipe_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> comp_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> rel_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> arrow_op_eol matched_expr : {'$1', '$2'}. +%% Warn for no parens subset +matched_op_expr -> arrow_op_eol no_parens_one_expr : warn_pipe('$1', '$2'), {'$1', '$2'}. + +unmatched_op_expr -> match_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> add_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> mult_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> two_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> three_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> and_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> or_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> in_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> in_match_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> type_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> when_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> pipe_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> comp_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> rel_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> arrow_op_eol unmatched_expr : {'$1', '$2'}. + +no_parens_op_expr -> match_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> add_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> mult_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> two_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> three_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> and_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> or_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> in_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> in_match_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> type_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> when_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> pipe_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> comp_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> rel_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> arrow_op_eol no_parens_expr : {'$1', '$2'}. +%% Warn for no parens subset +no_parens_op_expr -> arrow_op_eol no_parens_one_ambig_expr : warn_pipe('$1', '$2'), {'$1', '$2'}. +no_parens_op_expr -> arrow_op_eol no_parens_many_expr : warn_pipe('$1', '$2'), {'$1', '$2'}. + +%% Allow when (and only when) with keywords +no_parens_op_expr -> when_op_eol call_args_no_parens_kw : {'$1', '$2'}. + +no_parens_one_ambig_expr -> dot_op_identifier call_args_no_parens_ambig : build_identifier('$1', '$2'). +no_parens_one_ambig_expr -> dot_identifier call_args_no_parens_ambig : build_identifier('$1', '$2'). + +no_parens_many_expr -> dot_op_identifier call_args_no_parens_many_strict : build_identifier('$1', '$2'). +no_parens_many_expr -> dot_identifier call_args_no_parens_many_strict : build_identifier('$1', '$2'). + +no_parens_one_expr -> dot_op_identifier call_args_no_parens_one : build_identifier('$1', '$2'). +no_parens_one_expr -> dot_identifier call_args_no_parens_one : build_identifier('$1', '$2'). +no_parens_zero_expr -> dot_do_identifier : build_identifier('$1', nil). +no_parens_zero_expr -> dot_identifier : build_identifier('$1', nil). + +%% From this point on, we just have constructs that can be +%% used with the access syntax. Notice that (dot_)identifier +%% is not included in this list simply because the tokenizer +%% marks identifiers followed by brackets as bracket_identifier. +access_expr -> bracket_at_expr : '$1'. +access_expr -> bracket_expr : '$1'. +access_expr -> at_op_eol number : build_unary_op('$1', ?exprs('$2')). +access_expr -> unary_op_eol number : build_unary_op('$1', ?exprs('$2')). +access_expr -> capture_op_eol number : build_unary_op('$1', ?exprs('$2')). +access_expr -> fn_eoe stab end_eoe : build_fn('$1', reverse('$2')). +access_expr -> open_paren stab close_paren : build_stab(reverse('$2')). +access_expr -> open_paren stab ';' close_paren : build_stab(reverse('$2')). +access_expr -> open_paren ';' stab ';' close_paren : build_stab(reverse('$3')). +access_expr -> open_paren ';' stab close_paren : build_stab(reverse('$3')). +access_expr -> open_paren ';' close_paren : build_stab([]). +access_expr -> empty_paren : nil. +access_expr -> number : ?exprs('$1'). +access_expr -> list : element(1, '$1'). +access_expr -> map : '$1'. +access_expr -> tuple : '$1'. +access_expr -> 'true' : ?id('$1'). +access_expr -> 'false' : ?id('$1'). +access_expr -> 'nil' : ?id('$1'). +access_expr -> bin_string : build_bin_string('$1'). +access_expr -> list_string : build_list_string('$1'). +access_expr -> bit_string : '$1'. +access_expr -> sigil : build_sigil('$1'). +access_expr -> max_expr : '$1'. + +%% Aliases and properly formed calls. Used by map_expr. +max_expr -> atom : ?exprs('$1'). +max_expr -> atom_safe : build_quoted_atom('$1', true). +max_expr -> atom_unsafe : build_quoted_atom('$1', false). +max_expr -> parens_call call_args_parens : build_identifier('$1', '$2'). +max_expr -> parens_call call_args_parens call_args_parens : build_nested_parens('$1', '$2', '$3'). +max_expr -> dot_alias : '$1'. + +bracket_arg -> open_bracket kw close_bracket : build_list('$1', '$2'). +bracket_arg -> open_bracket container_expr close_bracket : build_list('$1', '$2'). +bracket_arg -> open_bracket container_expr ',' close_bracket : build_list('$1', '$2'). + +bracket_expr -> dot_bracket_identifier bracket_arg : build_access(build_identifier('$1', nil), '$2'). +bracket_expr -> access_expr bracket_arg : build_access('$1', '$2'). + +bracket_at_expr -> at_op_eol dot_bracket_identifier bracket_arg : + build_access(build_unary_op('$1', build_identifier('$2', nil)), '$3'). +bracket_at_expr -> at_op_eol access_expr bracket_arg : + build_access(build_unary_op('$1', '$2'), '$3'). + +%% Blocks + +do_block -> do_eoe 'end' : [[{do, nil}]]. +do_block -> do_eoe stab end_eoe : [[{do, build_stab(reverse('$2'))}]]. +do_block -> do_eoe block_list 'end' : [[{do, nil}|'$2']]. +do_block -> do_eoe stab_eoe block_list 'end' : [[{do, build_stab(reverse('$2'))}|'$3']]. + +eoe -> eol : '$1'. +eoe -> ';' : '$1'. +eoe -> eol ';' : '$1'. + +fn_eoe -> 'fn' : '$1'. +fn_eoe -> 'fn' eoe : '$1'. + +do_eoe -> 'do' : '$1'. +do_eoe -> 'do' eoe : '$1'. + +end_eoe -> 'end' : '$1'. +end_eoe -> eoe 'end' : '$2'. + +block_eoe -> block_identifier : '$1'. +block_eoe -> block_identifier eoe : '$1'. + +stab -> stab_expr : ['$1']. +stab -> stab eoe stab_expr : ['$3'|'$1']. + +stab_eoe -> stab : '$1'. +stab_eoe -> stab eoe : '$1'. + +%% Here, `element(1, Token)` is the stab operator, +%% while `element(2, Token)` is the expression. +stab_expr -> expr : + '$1'. +stab_expr -> stab_op_eol_and_expr : + build_op(element(1, '$1'), [], element(2, '$1')). +stab_expr -> empty_paren stab_op_eol_and_expr : + build_op(element(1, '$2'), [], element(2, '$2')). +stab_expr -> call_args_no_parens_all stab_op_eol_and_expr : + build_op(element(1, '$2'), unwrap_when(unwrap_splice('$1')), element(2, '$2')). +stab_expr -> stab_parens_many stab_op_eol_and_expr : + build_op(element(1, '$2'), unwrap_splice('$1'), element(2, '$2')). +stab_expr -> stab_parens_many when_op expr stab_op_eol_and_expr : + build_op(element(1, '$4'), [{'when', meta_from_token('$2'), unwrap_splice('$1') ++ ['$3']}], element(2, '$4')). + +stab_op_eol_and_expr -> stab_op_eol expr : {'$1', '$2'}. +stab_op_eol_and_expr -> stab_op_eol : warn_empty_stab_clause('$1'), {'$1', nil}. + +block_item -> block_eoe stab_eoe : {?exprs('$1'), build_stab(reverse('$2'))}. +block_item -> block_eoe : {?exprs('$1'), nil}. + +block_list -> block_item : ['$1']. +block_list -> block_item block_list : ['$1'|'$2']. + +%% Helpers + +open_paren -> '(' : '$1'. +open_paren -> '(' eol : '$1'. +close_paren -> ')' : '$1'. +close_paren -> eol ')' : '$2'. + +empty_paren -> open_paren ')' : '$1'. + +open_bracket -> '[' : '$1'. +open_bracket -> '[' eol : '$1'. +close_bracket -> ']' : '$1'. +close_bracket -> eol ']' : '$2'. + +open_bit -> '<<' : '$1'. +open_bit -> '<<' eol : '$1'. +close_bit -> '>>' : '$1'. +close_bit -> eol '>>' : '$2'. + +open_curly -> '{' : '$1'. +open_curly -> '{' eol : '$1'. +close_curly -> '}' : '$1'. +close_curly -> eol '}' : '$2'. + +% Operators + +add_op_eol -> add_op : '$1'. +add_op_eol -> add_op eol : '$1'. +add_op_eol -> dual_op : '$1'. +add_op_eol -> dual_op eol : '$1'. + +mult_op_eol -> mult_op : '$1'. +mult_op_eol -> mult_op eol : '$1'. + +two_op_eol -> two_op : '$1'. +two_op_eol -> two_op eol : '$1'. + +three_op_eol -> three_op : '$1'. +three_op_eol -> three_op eol : '$1'. + +pipe_op_eol -> pipe_op : '$1'. +pipe_op_eol -> pipe_op eol : '$1'. + +capture_op_eol -> capture_op : '$1'. +capture_op_eol -> capture_op eol : '$1'. + +unary_op_eol -> unary_op : '$1'. +unary_op_eol -> unary_op eol : '$1'. +unary_op_eol -> dual_op : '$1'. +unary_op_eol -> dual_op eol : '$1'. + +match_op_eol -> match_op : '$1'. +match_op_eol -> match_op eol : '$1'. + +and_op_eol -> and_op : '$1'. +and_op_eol -> and_op eol : '$1'. + +or_op_eol -> or_op : '$1'. +or_op_eol -> or_op eol : '$1'. + +in_op_eol -> in_op : '$1'. +in_op_eol -> in_op eol : '$1'. + +in_match_op_eol -> in_match_op : '$1'. +in_match_op_eol -> in_match_op eol : '$1'. + +type_op_eol -> type_op : '$1'. +type_op_eol -> type_op eol : '$1'. + +when_op_eol -> when_op : '$1'. +when_op_eol -> when_op eol : '$1'. + +stab_op_eol -> stab_op : '$1'. +stab_op_eol -> stab_op eol : '$1'. + +at_op_eol -> at_op : '$1'. +at_op_eol -> at_op eol : '$1'. + +comp_op_eol -> comp_op : '$1'. +comp_op_eol -> comp_op eol : '$1'. + +rel_op_eol -> rel_op : '$1'. +rel_op_eol -> rel_op eol : '$1'. + +arrow_op_eol -> arrow_op : '$1'. +arrow_op_eol -> arrow_op eol : '$1'. + +% Dot operator + +dot_op -> '.' : '$1'. +dot_op -> '.' eol : '$1'. + +dot_identifier -> identifier : '$1'. +dot_identifier -> matched_expr dot_op identifier : build_dot('$2', '$1', '$3'). + +dot_alias -> aliases : {'__aliases__', meta_from_token('$1', 0), ?exprs('$1')}. +dot_alias -> matched_expr dot_op aliases : build_dot_alias('$2', '$1', '$3'). +dot_alias -> matched_expr dot_op dot_alias_container : build_dot_container('$2', '$1', '$3'). + +dot_alias_container -> open_curly '}' : []. +dot_alias_container -> open_curly container_args close_curly : '$2'. + +dot_op_identifier -> op_identifier : '$1'. +dot_op_identifier -> matched_expr dot_op op_identifier : build_dot('$2', '$1', '$3'). + +dot_do_identifier -> do_identifier : '$1'. +dot_do_identifier -> matched_expr dot_op do_identifier : build_dot('$2', '$1', '$3'). + +dot_bracket_identifier -> bracket_identifier : '$1'. +dot_bracket_identifier -> matched_expr dot_op bracket_identifier : build_dot('$2', '$1', '$3'). + +dot_paren_identifier -> paren_identifier : '$1'. +dot_paren_identifier -> matched_expr dot_op paren_identifier : build_dot('$2', '$1', '$3'). + +parens_call -> dot_paren_identifier : '$1'. +parens_call -> matched_expr dot_call_op : {'.', meta_from_token('$2'), ['$1']}. % Fun/local calls + +% Function calls with no parentheses + +call_args_no_parens_expr -> matched_expr : '$1'. +call_args_no_parens_expr -> no_parens_expr : throw_no_parens_many_strict('$1'). + +call_args_no_parens_comma_expr -> matched_expr ',' call_args_no_parens_expr : ['$3', '$1']. +call_args_no_parens_comma_expr -> call_args_no_parens_comma_expr ',' call_args_no_parens_expr : ['$3'|'$1']. + +call_args_no_parens_all -> call_args_no_parens_one : '$1'. +call_args_no_parens_all -> call_args_no_parens_ambig : '$1'. +call_args_no_parens_all -> call_args_no_parens_many : '$1'. + +call_args_no_parens_one -> call_args_no_parens_kw : ['$1']. +call_args_no_parens_one -> matched_expr : ['$1']. + +call_args_no_parens_ambig -> no_parens_expr : ['$1']. + +call_args_no_parens_many -> matched_expr ',' call_args_no_parens_kw : ['$1', '$3']. +call_args_no_parens_many -> call_args_no_parens_comma_expr : reverse('$1'). +call_args_no_parens_many -> call_args_no_parens_comma_expr ',' call_args_no_parens_kw : reverse(['$3'|'$1']). + +call_args_no_parens_many_strict -> call_args_no_parens_many : '$1'. +call_args_no_parens_many_strict -> open_paren call_args_no_parens_kw close_paren : throw_no_parens_strict('$1'). +call_args_no_parens_many_strict -> open_paren call_args_no_parens_many close_paren : throw_no_parens_strict('$1'). + +stab_parens_many -> open_paren call_args_no_parens_kw close_paren : ['$2']. +stab_parens_many -> open_paren call_args_no_parens_many close_paren : '$2'. + +% Containers + +container_expr -> matched_expr : '$1'. +container_expr -> unmatched_expr : '$1'. +container_expr -> no_parens_expr : throw_no_parens_container_strict('$1'). + +container_args_base -> container_expr : ['$1']. +container_args_base -> container_args_base ',' container_expr : ['$3'|'$1']. + +container_args -> container_args_base : lists:reverse('$1'). +container_args -> container_args_base ',' : lists:reverse('$1'). +container_args -> container_args_base ',' kw : lists:reverse(['$3'|'$1']). + +% Function calls with parentheses + +call_args_parens_expr -> matched_expr : '$1'. +call_args_parens_expr -> unmatched_expr : '$1'. +call_args_parens_expr -> no_parens_expr : throw_no_parens_many_strict('$1'). + +call_args_parens_base -> call_args_parens_expr : ['$1']. +call_args_parens_base -> call_args_parens_base ',' call_args_parens_expr : ['$3'|'$1']. + +call_args_parens -> empty_paren : []. +call_args_parens -> open_paren no_parens_expr close_paren : ['$2']. +call_args_parens -> open_paren kw close_paren : ['$2']. +call_args_parens -> open_paren call_args_parens_base close_paren : reverse('$2'). +call_args_parens -> open_paren call_args_parens_base ',' kw close_paren : reverse(['$4'|'$2']). + +% KV + +kw_eol -> kw_identifier : ?exprs('$1'). +kw_eol -> kw_identifier eol : ?exprs('$1'). +kw_eol -> kw_identifier_safe : build_quoted_atom('$1', true). +kw_eol -> kw_identifier_safe eol : build_quoted_atom('$1', true). +kw_eol -> kw_identifier_unsafe : build_quoted_atom('$1', false). +kw_eol -> kw_identifier_unsafe eol : build_quoted_atom('$1', false). + +kw_base -> kw_eol container_expr : [{'$1', '$2'}]. +kw_base -> kw_base ',' kw_eol container_expr : [{'$3', '$4'}|'$1']. + +kw -> kw_base : reverse('$1'). +kw -> kw_base ',' : reverse('$1'). + +call_args_no_parens_kw_expr -> kw_eol matched_expr : {'$1', '$2'}. +call_args_no_parens_kw_expr -> kw_eol no_parens_expr : {'$1', '$2'}. + +call_args_no_parens_kw -> call_args_no_parens_kw_expr : ['$1']. +call_args_no_parens_kw -> call_args_no_parens_kw_expr ',' call_args_no_parens_kw : ['$1'|'$3']. + +% Lists + +list_args -> kw : '$1'. +list_args -> container_args_base : reverse('$1'). +list_args -> container_args_base ',' : reverse('$1'). +list_args -> container_args_base ',' kw : reverse('$1', '$3'). + +list -> open_bracket ']' : build_list('$1', []). +list -> open_bracket list_args close_bracket : build_list('$1', '$2'). + +% Tuple + +tuple -> open_curly '}' : build_tuple('$1', []). +tuple -> open_curly container_args close_curly : build_tuple('$1', '$2'). + +% Bitstrings + +bit_string -> open_bit '>>' : build_bit('$1', []). +bit_string -> open_bit container_args close_bit : build_bit('$1', '$2'). + +% Map and structs + +%% Allow unquote/@something/aliases inside maps and structs. +map_expr -> max_expr : '$1'. +map_expr -> dot_identifier : build_identifier('$1', nil). +map_expr -> at_op_eol map_expr : build_unary_op('$1', '$2'). + +assoc_op_eol -> assoc_op : '$1'. +assoc_op_eol -> assoc_op eol : '$1'. + +assoc_expr -> matched_expr assoc_op_eol matched_expr : {'$1', '$3'}. +assoc_expr -> unmatched_expr assoc_op_eol unmatched_expr : {'$1', '$3'}. +assoc_expr -> matched_expr assoc_op_eol unmatched_expr : {'$1', '$3'}. +assoc_expr -> unmatched_expr assoc_op_eol matched_expr : {'$1', '$3'}. +assoc_expr -> map_expr : '$1'. + +assoc_update -> matched_expr pipe_op_eol assoc_expr : {'$2', '$1', ['$3']}. +assoc_update -> unmatched_expr pipe_op_eol assoc_expr : {'$2', '$1', ['$3']}. + +assoc_update_kw -> matched_expr pipe_op_eol kw : {'$2', '$1', '$3'}. +assoc_update_kw -> unmatched_expr pipe_op_eol kw : {'$2', '$1', '$3'}. + +assoc_base -> assoc_expr : ['$1']. +assoc_base -> assoc_base ',' assoc_expr : ['$3'|'$1']. + +assoc -> assoc_base : reverse('$1'). +assoc -> assoc_base ',' : reverse('$1'). + +map_op -> '%{}' : '$1'. +map_op -> '%{}' eol : '$1'. + +map_close -> kw close_curly : '$1'. +map_close -> assoc close_curly : '$1'. +map_close -> assoc_base ',' kw close_curly : reverse('$1', '$3'). + +map_args -> open_curly '}' : build_map('$1', []). +map_args -> open_curly map_close : build_map('$1', '$2'). +map_args -> open_curly assoc_update close_curly : build_map_update('$1', '$2', []). +map_args -> open_curly assoc_update ',' close_curly : build_map_update('$1', '$2', []). +map_args -> open_curly assoc_update ',' map_close : build_map_update('$1', '$2', '$4'). +map_args -> open_curly assoc_update_kw close_curly : build_map_update('$1', '$2', []). + +struct_op -> '%' : '$1'. + +map -> map_op map_args : '$2'. +map -> struct_op map_expr map_args : {'%', meta_from_token('$1'), ['$2', '$3']}. +map -> struct_op map_expr eol map_args : {'%', meta_from_token('$1'), ['$2', '$4']}. + +Erlang code. + +-define(file(), get(elixir_parser_file)). +-define(id(Token), element(1, Token)). +-define(location(Token), element(2, Token)). +-define(exprs(Token), element(3, Token)). +-define(meta(Node), element(2, Node)). +-define(rearrange_uop(Op), (Op == 'not' orelse Op == '!')). + +%% The following directive is needed for (significantly) faster +%% compilation of the generated .erl file by the HiPE compiler +-compile([{hipe, [{regalloc, linear_scan}]}]). +-import(lists, [reverse/1, reverse/2]). + +meta_from_token(Token, Counter) -> [{counter, Counter}|meta_from_token(Token)]. +meta_from_token(Token) -> meta_from_location(?location(Token)). + +meta_from_location({Line, Column, EndColumn}) + when is_integer(Line), is_integer(Column), is_integer(EndColumn) -> [{line, Line}]. + +%% Operators + +build_op({_Kind, Location, 'in'}, {UOp, _, [Left]}, Right) when ?rearrange_uop(UOp) -> + {UOp, meta_from_location(Location), [{'in', meta_from_location(Location), [Left, Right]}]}; + +build_op({_Kind, Location, Op}, Left, Right) -> + {Op, meta_from_location(Location), [Left, Right]}. + +build_unary_op({_Kind, Location, Op}, Expr) -> + {Op, meta_from_location(Location), [Expr]}. + +build_list(Marker, Args) -> + {Args, ?location(Marker)}. + +build_tuple(_Marker, [Left, Right]) -> + {Left, Right}; +build_tuple(Marker, Args) -> + {'{}', meta_from_token(Marker), Args}. + +build_bit(Marker, Args) -> + {'<<>>', meta_from_token(Marker), Args}. + +build_map(Marker, Args) -> + {'%{}', meta_from_token(Marker), Args}. + +build_map_update(Marker, {Pipe, Left, Right}, Extra) -> + {'%{}', meta_from_token(Marker), [build_op(Pipe, Left, Right ++ Extra)]}. + +%% Blocks + +build_block([{Op, _, [_]}]=Exprs) when ?rearrange_uop(Op) -> {'__block__', [], Exprs}; +build_block([{unquote_splicing, _, Args}]=Exprs) when + length(Args) =< 2 -> {'__block__', [], Exprs}; +build_block([Expr]) -> Expr; +build_block(Exprs) -> {'__block__', [], Exprs}. + +%% Dots + +build_dot_alias(Dot, {'__aliases__', _, Left}, {'aliases', _, Right}) -> + {'__aliases__', meta_from_token(Dot), Left ++ Right}; + +build_dot_alias(_Dot, Atom, {'aliases', _, _} = Token) when is_atom(Atom) -> + throw_bad_atom(Token); + +build_dot_alias(Dot, Other, {'aliases', _, Right}) -> + {'__aliases__', meta_from_token(Dot), [Other|Right]}. + +build_dot_container(Dot, Left, Right) -> + Meta = meta_from_token(Dot), + {{'.', Meta, [Left, '{}']}, Meta, Right}. + +build_dot(Dot, Left, Right) -> + {'.', meta_from_token(Dot), [Left, extract_identifier(Right)]}. + +extract_identifier({Kind, _, Identifier}) when + Kind == identifier; Kind == bracket_identifier; Kind == paren_identifier; + Kind == do_identifier; Kind == op_identifier -> + Identifier. + +%% Identifiers + +build_nested_parens(Dot, Args1, Args2) -> + Identifier = build_identifier(Dot, Args1), + Meta = ?meta(Identifier), + {Identifier, Meta, Args2}. + +build_identifier({'.', Meta, _} = Dot, Args) -> + FArgs = case Args of + nil -> []; + _ -> Args + end, + {Dot, Meta, FArgs}; + +build_identifier({op_identifier, Location, Identifier}, [Arg]) -> + {Identifier, [{ambiguous_op, nil}|meta_from_location(Location)], [Arg]}; + +build_identifier({_, Location, Identifier}, Args) -> + {Identifier, meta_from_location(Location), Args}. + +%% Fn + +build_fn(Op, [{'->', _, [_, _]}|_] = Stab) -> + {fn, meta_from_token(Op), build_stab(Stab)}; +build_fn(Op, _Stab) -> + throw(meta_from_token(Op), "expected clauses to be defined with -> inside: ", "'fn'"). + +%% Access + +build_access(Expr, {List, Location}) -> + Meta = meta_from_location(Location), + {{'.', Meta, ['Elixir.Access', get]}, Meta, [Expr, List]}. + +%% Interpolation aware + +build_sigil({sigil, Location, Sigil, Parts, Modifiers}) -> + Meta = meta_from_location(Location), + {list_to_atom("sigil_" ++ [Sigil]), Meta, [{'<<>>', Meta, string_parts(Parts)}, Modifiers]}. + +build_bin_string({bin_string, _Location, [H]}) when is_binary(H) -> + H; +build_bin_string({bin_string, Location, Args}) -> + {'<<>>', meta_from_location(Location), string_parts(Args)}. + +build_list_string({list_string, _Location, [H]}) when is_binary(H) -> + elixir_utils:characters_to_list(H); +build_list_string({list_string, Location, Args}) -> + Meta = meta_from_location(Location), + {{'.', Meta, ['Elixir.String', to_char_list]}, Meta, [{'<<>>', Meta, string_parts(Args)}]}. + +build_quoted_atom({_, _Location, [H]}, Safe) when is_binary(H) -> + Op = binary_to_atom_op(Safe), erlang:Op(H, utf8); +build_quoted_atom({_, Location, Args}, Safe) -> + Meta = meta_from_location(Location), + {{'.', Meta, [erlang, binary_to_atom_op(Safe)]}, Meta, [{'<<>>', Meta, string_parts(Args)}, utf8]}. + +binary_to_atom_op(true) -> binary_to_existing_atom; +binary_to_atom_op(false) -> binary_to_atom. + +string_parts(Parts) -> + [string_part(Part) || Part <- Parts]. +string_part(Binary) when is_binary(Binary) -> + Binary; +string_part({Location, Tokens}) -> + Form = string_tokens_parse(Tokens), + Meta = meta_from_location(Location), + {'::', Meta, [{{'.', Meta, ['Elixir.Kernel', to_string]}, Meta, [Form]}, {binary, Meta, nil}]}. + +string_tokens_parse(Tokens) -> + case parse(Tokens) of + {ok, Forms} -> Forms; + {error, _} = Error -> throw(Error) + end. + +%% Keywords + +build_stab([{'->', Meta, [Left, Right]}|T]) -> + build_stab(Meta, T, Left, [Right], []); + +build_stab(Else) -> + build_block(Else). + +build_stab(Old, [{'->', New, [Left, Right]}|T], Marker, Temp, Acc) -> + H = {'->', Old, [Marker, build_block(reverse(Temp))]}, + build_stab(New, T, Left, [Right], [H|Acc]); + +build_stab(Meta, [H|T], Marker, Temp, Acc) -> + build_stab(Meta, T, Marker, [H|Temp], Acc); + +build_stab(Meta, [], Marker, Temp, Acc) -> + H = {'->', Meta, [Marker, build_block(reverse(Temp))]}, + reverse([H|Acc]). + +%% Every time the parser sees a (unquote_splicing()) +%% it assumes that a block is being spliced, wrapping +%% the splicing in a __block__. But in the stab clause, +%% we can have (unquote_splicing(1, 2, 3)) -> :ok, in such +%% case, we don't actually want the block, since it is +%% an arg style call. unwrap_splice unwraps the splice +%% from such blocks. +unwrap_splice([{'__block__', [], [{unquote_splicing, _, _}] = Splice}]) -> + Splice; + +unwrap_splice(Other) -> Other. + +unwrap_when(Args) -> + case elixir_utils:split_last(Args) of + {Start, {'when', Meta, [_, _] = End}} -> + [{'when', Meta, Start ++ End}]; + {_, _} -> + Args + end. + +to_block([One]) -> One; +to_block(Other) -> {'__block__', [], reverse(Other)}. + +%% Warnings and errors + +throw(Meta, Error, Token) -> + Line = + case lists:keyfind(line, 1, Meta) of + {line, L} -> L; + false -> 0 + end, + throw({error, {Line, ?MODULE, [Error, Token]}}). + +throw_bad_atom(Token) -> + throw(meta_from_token(Token), "atom cannot be followed by an alias. If the '.' was meant to be " + "part of the atom's name, the atom name must be quoted. Syntax error before: ", "'.'"). + +throw_no_parens_strict(Token) -> + throw(meta_from_token(Token), "unexpected parentheses. If you are making a " + "function call, do not insert spaces between the function name and the " + "opening parentheses. Syntax error before: ", "'('"). + +throw_no_parens_many_strict(Node) -> + throw(?meta(Node), + "unexpected comma. Parentheses are required to solve ambiguity in nested calls.\n\n" + "This error happens when you have nested function calls without parentheses. " + "For example:\n\n" + " one a, two b, c, d\n\n" + "In the example above, we don't know if the parameters \"c\" and \"d\" apply " + "to the function \"one\" or \"two\". You can solve this by explicitly adding " + "parentheses:\n\n" + " one a, two(b, c, d)\n\n" + "Elixir cannot compile otherwise. Syntax error before: ", "','"). + +throw_no_parens_container_strict(Node) -> + throw(?meta(Node), + "unexpected comma. Parentheses are required to solve ambiguity inside containers.\n\n" + "This error may happen when you forget a comma in a list or other container:\n\n" + " [a, b c, d]\n\n" + "Or when you have ambiguous calls:\n\n" + " [one, two three, four, five]\n\n" + "In the example above, we don't know if the parameters \"four\" and \"five\" " + "belongs to the list or the function \"two\". You can solve this by explicitly " + "adding parentheses:\n\n" + " [one, two(three, four), five]\n\n" + "Elixir cannot compile otherwise. Syntax error before: ", "','"). + +throw_invalid_kw_identifier({_, _, do} = Token) -> + throw(meta_from_token(Token), elixir_tokenizer:invalid_do_error("unexpected keyword \"do:\""), "'do:'"); +throw_invalid_kw_identifier({_, _, KW} = Token) -> + throw(meta_from_token(Token), "syntax error before: ", "'" ++ atom_to_list(KW) ++ "':"). + +%% TODO: Make those warnings errors. +warn_empty_stab_clause({stab_op, {Line, _Begin, _End}, '->'}) -> + elixir_errors:warn(Line, ?file(), + "an expression is always required on the right side of ->. " + "Please provide a value after ->"). + +warn_pipe({arrow_op, {Line, _Begin, _End}, Op}, {_, [_|_], [_|_]}) -> + elixir_errors:warn(Line, ?file(), + io_lib:format( + "you are piping into a function call without parentheses, which may be ambiguous. " + "Please wrap the function you are piping into in parentheses. For example:\n\n" + " foo 1 ~ts bar 2 ~ts baz 3\n\n" + "Should be written as:\n\n" + " foo(1) ~ts bar(2) ~ts baz(3)\n", + [Op, Op, Op, Op] + ) + ); +warn_pipe(_Token, _) -> + ok. diff --git a/samples/Erlang/lfe_scan.xrl b/samples/Erlang/lfe_scan.xrl new file mode 100644 index 00000000..72bb1b22 --- /dev/null +++ b/samples/Erlang/lfe_scan.xrl @@ -0,0 +1,256 @@ +%% Copyright (c) 2008-2013 Robert Virding +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. + +%% File : lfe_scan.xrl +%% Author : Robert Virding +%% Purpose : Token definitions for Lisp Flavoured Erlang. + +Definitions. +B = [01] +O = [0-7] +D = [0-9] +H = [0-9a-fA-F] +B36 = [0-9a-zA-Z] +U = [A-Z] +L = [a-z] +A = ({U}|{L}) +DEL = [][()}{";\000-\s] +SYM = [^][()}{";\000-\s\177-\237] +SSYM = [^][()}{"|;#`',\000-\s\177-\237] +WS = ([\000-\s]|;[^\n]*) + +Rules. +%% Bracketed Comments using #| foo |# +#{D}*\|[^\|]*\|+([^#\|][^\|]*\|+)*# : + block_comment(string:substr(TokenChars, 3)). + +%% Separators +' : {token,{'\'',TokenLine}}. +` : {token,{'`',TokenLine}}. +, : {token,{',',TokenLine}}. +,@ : {token,{',@',TokenLine}}. +\. : {token,{'.',TokenLine}}. +[][()}{] : {token,{list_to_atom(TokenChars),TokenLine}}. + +#{D}*[bB]\( : {token,{'#B(',TokenLine}}. +#{D}*[mM]\( : {token,{'#M(',TokenLine}}. +#{D}*\( : {token,{'#(',TokenLine}}. +#{D}*\. : {token,{'#.',TokenLine}}. + +#{D}*` : {token,{'#`',TokenLine}}. +#{D}*; : {token,{'#;',TokenLine}}. +#{D}*, : {token,{'#,',TokenLine}}. +#{D}*,@ : {token,{'#,@',TokenLine}}. + +%% Characters +#{D}*\\(x{H}+|.) : char_token(skip_past(TokenChars, $\\, $\\), TokenLine). + +%% Based numbers +#{D}*\*{SYM}+ : base_token(skip_past(TokenChars, $*, $*), 2, TokenLine). +#{D}*[bB]{SYM}+ : base_token(skip_past(TokenChars, $b, $B), 2, TokenLine). +#{D}*[oO]{SYM}+ : base_token(skip_past(TokenChars, $o, $O), 8, TokenLine). +#{D}*[dD]{SYM}+ : base_token(skip_past(TokenChars, $d, $D), 10, TokenLine). +#{D}*[xX]{SYM}+ : base_token(skip_past(TokenChars, $x, $X), 16, TokenLine). +#{D}*[rR]{SYM}+ : + %% Scan over digit chars to get base. + {Base,[_|Ds]} = base1(tl(TokenChars), 10, 0), + base_token(Ds, Base, TokenLine). + +%% String +"(\\x{H}+;|\\.|[^"\\])*" : + %% Strip quotes. + S = string:substr(TokenChars, 2, TokenLen - 2), + {token,{string,TokenLine,chars(S)}}. +%% Binary string +#"(\\x{H}+;|\\.|[^"\\])*" : + %% Strip quotes. + S = string:substr(TokenChars, 3, TokenLen - 3), + Bin = unicode:characters_to_binary(chars(S), utf8, utf8), + {token,{binary,TokenLine,Bin}}. +%% Symbols +\|(\\x{H}+;|\\.|[^|\\])*\| : + %% Strip quotes. + S = string:substr(TokenChars, 2, TokenLen - 2), + symbol_token(chars(S), TokenLine). +%% Funs +#'{SSYM}{SYM}*/{D}+ : + %% Strip sharpsign single-quote. + FunStr = string:substr(TokenChars,3), + {token,{'#\'',TokenLine,FunStr}}. +%% Atoms +[+-]?{D}+ : + case catch {ok,list_to_integer(TokenChars)} of + {ok,I} -> {token,{number,TokenLine,I}}; + _ -> {error,"illegal integer"} + end. +[+-]?{D}+\.{D}+([eE][+-]?{D}+)? : + case catch {ok,list_to_float(TokenChars)} of + {ok,F} -> {token,{number,TokenLine,F}}; + _ -> {error,"illegal float"} + end. +{SSYM}{SYM}* : + symbol_token(TokenChars, TokenLine). +{WS}+ : skip_token. + +Erlang code. +%% Copyright (c) 2008-2013 Robert Virding +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. + +%% File : lfe_scan.erl +%% Author : Robert Virding +%% Purpose : Token definitions for Lisp Flavoured Erlang. + +-export([start_symbol_char/1,symbol_char/1]). + +-import(string, [substr/2,substr/3]). + +%% start_symbol_char(Char) -> true | false. +%% symbol_char(Char) -> true | false. +%% Define start symbol chars and symbol chars. + +start_symbol_char($#) -> false; +start_symbol_char($`) -> false; +start_symbol_char($') -> false; %' +start_symbol_char($,) -> false; +start_symbol_char($|) -> false; %Symbol quote character +start_symbol_char(C) -> symbol_char(C). + +symbol_char($() -> false; +symbol_char($)) -> false; +symbol_char($[) -> false; +symbol_char($]) -> false; +symbol_char(${) -> false; +symbol_char($}) -> false; +symbol_char($") -> false; +symbol_char($;) -> false; +symbol_char(C) -> ((C > $\s) and (C =< $~)) orelse (C > $\240). + +%% symbol_token(Chars, Line) -> {token,{symbol,Line,Symbol}} | {error,E}. +%% Build a symbol from list of legal characters, else error. + +symbol_token(Cs, L) -> + case catch {ok,list_to_atom(Cs)} of + {ok,S} -> {token,{symbol,L,S}}; + _ -> {error,"illegal symbol"} + end. + +%% base_token(Chars, Base, Line) -> Integer. +%% Convert a string of Base characters into a number. We only allow +%% base betqeen 2 and 36, and an optional sign character first. + +base_token(_, B, _) when B < 2; B > 36 -> + {error,"illegal number base"}; +base_token([$+|Cs], B, L) -> base_token(Cs, B, +1, L); +base_token([$-|Cs], B, L) -> base_token(Cs, B, -1, L); +base_token(Cs, B, L) -> base_token(Cs, B, +1, L). + +base_token(Cs, B, S, L) -> + case base1(Cs, B, 0) of + {N,[]} -> {token,{number,L,S*N}}; + {_,_} -> {error,"illegal based number"} + end. + +base1([C|Cs], Base, SoFar) when C >= $0, C =< $9, C < Base + $0 -> + Next = SoFar * Base + (C - $0), + base1(Cs, Base, Next); +base1([C|Cs], Base, SoFar) when C >= $a, C =< $z, C < Base + $a - 10 -> + Next = SoFar * Base + (C - $a + 10), + base1(Cs, Base, Next); +base1([C|Cs], Base, SoFar) when C >= $A, C =< $Z, C < Base + $A - 10 -> + Next = SoFar * Base + (C - $A + 10), + base1(Cs, Base, Next); +base1([C|Cs], _Base, SoFar) -> {SoFar,[C|Cs]}; +base1([], _Base, N) -> {N,[]}. + +-define(IS_UNICODE(C), ((C >= 0) and (C =< 16#10FFFF))). + +%% char_token(InputChars, Line) -> {token,{number,L,N}} | {error,E}. +%% Convert an input string into the corresponding character. For a +%% sequence of hex characters we check resultant is code is in the +%% unicode range. + +char_token([$x,C|Cs], L) -> + case base1([C|Cs], 16, 0) of + {N,[]} when ?IS_UNICODE(N) -> {token,{number,L,N}}; + _ -> {error,"illegal character"} + end; +char_token([C], L) -> {token,{number,L,C}}. + +%% chars(InputChars) -> Chars. +%% Convert an input string into the corresponding string characters. +%% We know that the input string is correct. + +chars([$\\,$x,C|Cs0]) -> + case hex_char(C) of + true -> + case base1([C|Cs0], 16, 0) of + {N,[$;|Cs1]} -> [N|chars(Cs1)]; + _Other -> [escape_char($x)|chars([C|Cs0])] + end; + false -> [escape_char($x)|chars([C|Cs0])] + end; +chars([$\\,C|Cs]) -> [escape_char(C)|chars(Cs)]; +chars([C|Cs]) -> [C|chars(Cs)]; +chars([]) -> []. + +hex_char(C) when C >= $0, C =< $9 -> true; +hex_char(C) when C >= $a, C =< $f -> true; +hex_char(C) when C >= $A, C =< $F -> true; +hex_char(_) -> false. + +escape_char($b) -> $\b; %\b = BS +escape_char($t) -> $\t; %\t = TAB +escape_char($n) -> $\n; %\n = LF +escape_char($v) -> $\v; %\v = VT +escape_char($f) -> $\f; %\f = FF +escape_char($r) -> $\r; %\r = CR +escape_char($e) -> $\e; %\e = ESC +escape_char($s) -> $\s; %\s = SPC +escape_char($d) -> $\d; %\d = DEL +escape_char(C) -> C. + +%% Block Comment: +%% Provide a sensible error when people attempt to include nested +%% comments because currently the parser cannot process them without +%% a rebuild. But simply exploding on a '#|' is not going to be that +%% helpful. + +block_comment(TokenChars) -> + %% Check we're not opening another comment block. + case string:str(TokenChars, "#|") of + 0 -> skip_token; %% No nesting found + _ -> {error, "illegal nested block comment"} + end. + +%% skip_until(String, Char1, Char2) -> String. +%% skip_past(String, Char1, Char2) -> String. + +%% skip_until([C|_]=Cs, C1, C2) when C =:= C1 ; C =:= C2 -> Cs; +%% skip_until([_|Cs], C1, C2) -> skip_until(Cs, C1, C2); +%% skip_until([], _, _) -> []. + +skip_past([C|Cs], C1, C2) when C =:= C1 ; C =:= C2 -> Cs; +skip_past([_|Cs], C1, C2) -> skip_past(Cs, C1, C2); +skip_past([], _, _) -> []. diff --git a/samples/Java/GrammarKit.java b/samples/Java/GrammarKit.java new file mode 100644 index 00000000..a5db6da8 --- /dev/null +++ b/samples/Java/GrammarKit.java @@ -0,0 +1,625 @@ +// This is a generated file. Not intended for manual editing. +package org.intellij.grammar.parser; + +import com.intellij.lang.PsiBuilder; +import com.intellij.lang.PsiBuilder.Marker; +import static org.intellij.grammar.psi.BnfTypes.*; +import static org.intellij.grammar.parser.GeneratedParserUtilBase.*; +import com.intellij.psi.tree.IElementType; +import com.intellij.lang.ASTNode; +import com.intellij.psi.tree.TokenSet; +import com.intellij.lang.PsiParser; +import com.intellij.lang.LightPsiParser; + +@SuppressWarnings({"SimplifiableIfStatement", "UnusedAssignment"}) +public class GrammarParser implements PsiParser, LightPsiParser { + + public ASTNode parse(IElementType t, PsiBuilder b) { + parseLight(t, b); + return b.getTreeBuilt(); + } + + public void parseLight(IElementType t, PsiBuilder b) { + boolean r; + b = adapt_builder_(t, b, this, EXTENDS_SETS_); + Marker m = enter_section_(b, 0, _COLLAPSE_, null); + if (t == BNF_ATTR) { + r = attr(b, 0); + } + else if (t == BNF_ATTR_PATTERN) { + r = attr_pattern(b, 0); + } + else if (t == BNF_ATTR_VALUE) { + r = attr_value(b, 0); + } + else if (t == BNF_ATTRS) { + r = attrs(b, 0); + } + else if (t == BNF_CHOICE) { + r = choice(b, 0); + } + else if (t == BNF_EXPRESSION) { + r = expression(b, 0); + } + else if (t == BNF_LITERAL_EXPRESSION) { + r = literal_expression(b, 0); + } + else if (t == BNF_MODIFIER) { + r = modifier(b, 0); + } + else if (t == BNF_PAREN_EXPRESSION) { + r = paren_expression(b, 0); + } + else if (t == BNF_PREDICATE) { + r = predicate(b, 0); + } + else if (t == BNF_PREDICATE_SIGN) { + r = predicate_sign(b, 0); + } + else if (t == BNF_QUANTIFIED) { + r = quantified(b, 0); + } + else if (t == BNF_QUANTIFIER) { + r = quantifier(b, 0); + } + else if (t == BNF_REFERENCE_OR_TOKEN) { + r = reference_or_token(b, 0); + } + else if (t == BNF_RULE) { + r = rule(b, 0); + } + else if (t == BNF_SEQUENCE) { + r = sequence(b, 0); + } + else if (t == BNF_STRING_LITERAL_EXPRESSION) { + r = string_literal_expression(b, 0); + } + else { + r = parse_root_(t, b, 0); + } + exit_section_(b, 0, m, t, r, true, TRUE_CONDITION); + } + + protected boolean parse_root_(IElementType t, PsiBuilder b, int l) { + return grammar(b, l + 1); + } + + public static final TokenSet[] EXTENDS_SETS_ = new TokenSet[] { + create_token_set_(BNF_LITERAL_EXPRESSION, BNF_STRING_LITERAL_EXPRESSION), + create_token_set_(BNF_CHOICE, BNF_EXPRESSION, BNF_LITERAL_EXPRESSION, BNF_PAREN_EXPRESSION, + BNF_PREDICATE, BNF_QUANTIFIED, BNF_REFERENCE_OR_TOKEN, BNF_SEQUENCE, + BNF_STRING_LITERAL_EXPRESSION), + }; + + /* ********************************************************** */ + // id attr_pattern? '=' attr_value ';'? + public static boolean attr(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr")) return false; + boolean r, p; + Marker m = enter_section_(b, l, _NONE_, ""); + r = consumeToken(b, BNF_ID); + p = r; // pin = 1 + r = r && report_error_(b, attr_1(b, l + 1)); + r = p && report_error_(b, consumeToken(b, BNF_OP_EQ)) && r; + r = p && report_error_(b, attr_value(b, l + 1)) && r; + r = p && attr_4(b, l + 1) && r; + exit_section_(b, l, m, BNF_ATTR, r, p, attr_recover_until_parser_); + return r || p; + } + + // attr_pattern? + private static boolean attr_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_1")) return false; + attr_pattern(b, l + 1); + return true; + } + + // ';'? + private static boolean attr_4(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_4")) return false; + consumeToken(b, BNF_SEMICOLON); + return true; + } + + /* ********************************************************** */ + // '(' string ')' + public static boolean attr_pattern(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_pattern")) return false; + if (!nextTokenIs(b, BNF_LEFT_PAREN)) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_LEFT_PAREN); + r = r && consumeToken(b, BNF_STRING); + r = r && consumeToken(b, BNF_RIGHT_PAREN); + exit_section_(b, m, BNF_ATTR_PATTERN, r); + return r; + } + + /* ********************************************************** */ + // !'}' + static boolean attr_recover_until(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_recover_until")) return false; + boolean r; + Marker m = enter_section_(b, l, _NOT_, null); + r = !consumeToken(b, BNF_RIGHT_BRACE); + exit_section_(b, l, m, null, r, false, null); + return r; + } + + /* ********************************************************** */ + // (reference_or_token | literal_expression) !'=' + public static boolean attr_value(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_value")) return false; + boolean r; + Marker m = enter_section_(b, l, _NONE_, ""); + r = attr_value_0(b, l + 1); + r = r && attr_value_1(b, l + 1); + exit_section_(b, l, m, BNF_ATTR_VALUE, r, false, null); + return r; + } + + // reference_or_token | literal_expression + private static boolean attr_value_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_value_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = reference_or_token(b, l + 1); + if (!r) r = literal_expression(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // !'=' + private static boolean attr_value_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_value_1")) return false; + boolean r; + Marker m = enter_section_(b, l, _NOT_, null); + r = !consumeToken(b, BNF_OP_EQ); + exit_section_(b, l, m, null, r, false, null); + return r; + } + + /* ********************************************************** */ + // '{' attr* '}' + public static boolean attrs(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attrs")) return false; + if (!nextTokenIs(b, BNF_LEFT_BRACE)) return false; + boolean r, p; + Marker m = enter_section_(b, l, _NONE_, null); + r = consumeToken(b, BNF_LEFT_BRACE); + p = r; // pin = 1 + r = r && report_error_(b, attrs_1(b, l + 1)); + r = p && consumeToken(b, BNF_RIGHT_BRACE) && r; + exit_section_(b, l, m, BNF_ATTRS, r, p, null); + return r || p; + } + + // attr* + private static boolean attrs_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attrs_1")) return false; + int c = current_position_(b); + while (true) { + if (!attr(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "attrs_1", c)) break; + c = current_position_(b); + } + return true; + } + + /* ********************************************************** */ + // '{' sequence ('|' sequence)* '}' | sequence choice_tail* + public static boolean choice(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice")) return false; + boolean r; + Marker m = enter_section_(b, l, _COLLAPSE_, ""); + r = choice_0(b, l + 1); + if (!r) r = choice_1(b, l + 1); + exit_section_(b, l, m, BNF_CHOICE, r, false, null); + return r; + } + + // '{' sequence ('|' sequence)* '}' + private static boolean choice_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_LEFT_BRACE); + r = r && sequence(b, l + 1); + r = r && choice_0_2(b, l + 1); + r = r && consumeToken(b, BNF_RIGHT_BRACE); + exit_section_(b, m, null, r); + return r; + } + + // ('|' sequence)* + private static boolean choice_0_2(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_0_2")) return false; + int c = current_position_(b); + while (true) { + if (!choice_0_2_0(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "choice_0_2", c)) break; + c = current_position_(b); + } + return true; + } + + // '|' sequence + private static boolean choice_0_2_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_0_2_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_OP_OR); + r = r && sequence(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // sequence choice_tail* + private static boolean choice_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_1")) return false; + boolean r; + Marker m = enter_section_(b); + r = sequence(b, l + 1); + r = r && choice_1_1(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // choice_tail* + private static boolean choice_1_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_1_1")) return false; + int c = current_position_(b); + while (true) { + if (!choice_tail(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "choice_1_1", c)) break; + c = current_position_(b); + } + return true; + } + + /* ********************************************************** */ + // '|' sequence + static boolean choice_tail(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_tail")) return false; + if (!nextTokenIs(b, BNF_OP_OR)) return false; + boolean r, p; + Marker m = enter_section_(b, l, _NONE_, null); + r = consumeToken(b, BNF_OP_OR); + p = r; // pin = 1 + r = r && sequence(b, l + 1); + exit_section_(b, l, m, null, r, p, null); + return r || p; + } + + /* ********************************************************** */ + // choice? + public static boolean expression(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "expression")) return false; + Marker m = enter_section_(b, l, _COLLAPSE_, ""); + choice(b, l + 1); + exit_section_(b, l, m, BNF_EXPRESSION, true, false, null); + return true; + } + + /* ********************************************************** */ + // (attrs | rule) * + static boolean grammar(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "grammar")) return false; + int c = current_position_(b); + while (true) { + if (!grammar_0(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "grammar", c)) break; + c = current_position_(b); + } + return true; + } + + // attrs | rule + private static boolean grammar_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "grammar_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = attrs(b, l + 1); + if (!r) r = rule(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + /* ********************************************************** */ + // string_literal_expression | number + public static boolean literal_expression(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "literal_expression")) return false; + if (!nextTokenIs(b, "", BNF_NUMBER, BNF_STRING)) return false; + boolean r; + Marker m = enter_section_(b, l, _COLLAPSE_, ""); + r = string_literal_expression(b, l + 1); + if (!r) r = consumeToken(b, BNF_NUMBER); + exit_section_(b, l, m, BNF_LITERAL_EXPRESSION, r, false, null); + return r; + } + + /* ********************************************************** */ + // 'private' | 'external' | 'wrapped' + public static boolean modifier(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "modifier")) return false; + boolean r; + Marker m = enter_section_(b, l, _NONE_, ""); + r = consumeToken(b, "private"); + if (!r) r = consumeToken(b, "external"); + if (!r) r = consumeToken(b, "wrapped"); + exit_section_(b, l, m, BNF_MODIFIER, r, false, null); + return r; + } + + /* ********************************************************** */ + // quantified | predicate + static boolean option(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "option")) return false; + boolean r; + Marker m = enter_section_(b); + r = quantified(b, l + 1); + if (!r) r = predicate(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + /* ********************************************************** */ + // '(' expression ')' + public static boolean paren_expression(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "paren_expression")) return false; + if (!nextTokenIs(b, BNF_LEFT_PAREN)) return false; + boolean r, p; + Marker m = enter_section_(b, l, _NONE_, null); + r = consumeToken(b, BNF_LEFT_PAREN); + p = r; // pin = 1 + r = r && report_error_(b, expression(b, l + 1)); + r = p && consumeToken(b, BNF_RIGHT_PAREN) && r; + exit_section_(b, l, m, BNF_PAREN_EXPRESSION, r, p, null); + return r || p; + } + + /* ********************************************************** */ + // predicate_sign simple + public static boolean predicate(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "predicate")) return false; + if (!nextTokenIs(b, "", BNF_OP_NOT, BNF_OP_AND)) return false; + boolean r; + Marker m = enter_section_(b, l, _NONE_, ""); + r = predicate_sign(b, l + 1); + r = r && simple(b, l + 1); + exit_section_(b, l, m, BNF_PREDICATE, r, false, null); + return r; + } + + /* ********************************************************** */ + // '&' | '!' + public static boolean predicate_sign(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "predicate_sign")) return false; + if (!nextTokenIs(b, "", BNF_OP_NOT, BNF_OP_AND)) return false; + boolean r; + Marker m = enter_section_(b, l, _NONE_, ""); + r = consumeToken(b, BNF_OP_AND); + if (!r) r = consumeToken(b, BNF_OP_NOT); + exit_section_(b, l, m, BNF_PREDICATE_SIGN, r, false, null); + return r; + } + + /* ********************************************************** */ + // '[' expression ']' | simple quantifier? + public static boolean quantified(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "quantified")) return false; + boolean r; + Marker m = enter_section_(b, l, _COLLAPSE_, ""); + r = quantified_0(b, l + 1); + if (!r) r = quantified_1(b, l + 1); + exit_section_(b, l, m, BNF_QUANTIFIED, r, false, null); + return r; + } + + // '[' expression ']' + private static boolean quantified_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "quantified_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_LEFT_BRACKET); + r = r && expression(b, l + 1); + r = r && consumeToken(b, BNF_RIGHT_BRACKET); + exit_section_(b, m, null, r); + return r; + } + + // simple quantifier? + private static boolean quantified_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "quantified_1")) return false; + boolean r; + Marker m = enter_section_(b); + r = simple(b, l + 1); + r = r && quantified_1_1(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // quantifier? + private static boolean quantified_1_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "quantified_1_1")) return false; + quantifier(b, l + 1); + return true; + } + + /* ********************************************************** */ + // '?' | '+' | '*' + public static boolean quantifier(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "quantifier")) return false; + boolean r; + Marker m = enter_section_(b, l, _NONE_, ""); + r = consumeToken(b, BNF_OP_OPT); + if (!r) r = consumeToken(b, BNF_OP_ONEMORE); + if (!r) r = consumeToken(b, BNF_OP_ZEROMORE); + exit_section_(b, l, m, BNF_QUANTIFIER, r, false, null); + return r; + } + + /* ********************************************************** */ + // id + public static boolean reference_or_token(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "reference_or_token")) return false; + if (!nextTokenIs(b, BNF_ID)) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_ID); + exit_section_(b, m, BNF_REFERENCE_OR_TOKEN, r); + return r; + } + + /* ********************************************************** */ + // modifier* id '::=' expression attrs? ';'? + public static boolean rule(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "rule")) return false; + boolean r, p; + Marker m = enter_section_(b, l, _NONE_, ""); + r = rule_0(b, l + 1); + r = r && consumeToken(b, BNF_ID); + r = r && consumeToken(b, BNF_OP_IS); + p = r; // pin = 3 + r = r && report_error_(b, expression(b, l + 1)); + r = p && report_error_(b, rule_4(b, l + 1)) && r; + r = p && rule_5(b, l + 1) && r; + exit_section_(b, l, m, BNF_RULE, r, p, rule_recover_until_parser_); + return r || p; + } + + // modifier* + private static boolean rule_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "rule_0")) return false; + int c = current_position_(b); + while (true) { + if (!modifier(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "rule_0", c)) break; + c = current_position_(b); + } + return true; + } + + // attrs? + private static boolean rule_4(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "rule_4")) return false; + attrs(b, l + 1); + return true; + } + + // ';'? + private static boolean rule_5(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "rule_5")) return false; + consumeToken(b, BNF_SEMICOLON); + return true; + } + + /* ********************************************************** */ + // !'{' + static boolean rule_recover_until(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "rule_recover_until")) return false; + boolean r; + Marker m = enter_section_(b, l, _NOT_, null); + r = !consumeToken(b, BNF_LEFT_BRACE); + exit_section_(b, l, m, null, r, false, null); + return r; + } + + /* ********************************************************** */ + // option + + public static boolean sequence(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "sequence")) return false; + boolean r; + Marker m = enter_section_(b, l, _COLLAPSE_, ""); + r = option(b, l + 1); + int c = current_position_(b); + while (r) { + if (!option(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "sequence", c)) break; + c = current_position_(b); + } + exit_section_(b, l, m, BNF_SEQUENCE, r, false, null); + return r; + } + + /* ********************************************************** */ + // !(modifier* id '::=' ) reference_or_token | literal_expression | paren_expression + static boolean simple(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "simple")) return false; + boolean r; + Marker m = enter_section_(b); + r = simple_0(b, l + 1); + if (!r) r = literal_expression(b, l + 1); + if (!r) r = paren_expression(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // !(modifier* id '::=' ) reference_or_token + private static boolean simple_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "simple_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = simple_0_0(b, l + 1); + r = r && reference_or_token(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // !(modifier* id '::=' ) + private static boolean simple_0_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "simple_0_0")) return false; + boolean r; + Marker m = enter_section_(b, l, _NOT_, null); + r = !simple_0_0_0(b, l + 1); + exit_section_(b, l, m, null, r, false, null); + return r; + } + + // modifier* id '::=' + private static boolean simple_0_0_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "simple_0_0_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = simple_0_0_0_0(b, l + 1); + r = r && consumeToken(b, BNF_ID); + r = r && consumeToken(b, BNF_OP_IS); + exit_section_(b, m, null, r); + return r; + } + + // modifier* + private static boolean simple_0_0_0_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "simple_0_0_0_0")) return false; + int c = current_position_(b); + while (true) { + if (!modifier(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "simple_0_0_0_0", c)) break; + c = current_position_(b); + } + return true; + } + + /* ********************************************************** */ + // string + public static boolean string_literal_expression(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "string_literal_expression")) return false; + if (!nextTokenIs(b, BNF_STRING)) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_STRING); + exit_section_(b, m, BNF_STRING_LITERAL_EXPRESSION, r); + return r; + } + + final static Parser attr_recover_until_parser_ = new Parser() { + public boolean parse(PsiBuilder b, int l) { + return attr_recover_until(b, l + 1); + } + }; + final static Parser rule_recover_until_parser_ = new Parser() { + public boolean parse(PsiBuilder b, int l) { + return rule_recover_until(b, l + 1); + } + }; +} diff --git a/samples/Java/JFlexLexer.java b/samples/Java/JFlexLexer.java new file mode 100644 index 00000000..e54564fb --- /dev/null +++ b/samples/Java/JFlexLexer.java @@ -0,0 +1,482 @@ +/* The following code was generated by JFlex 1.4.3 on 28/01/16 11:27 */ + +package test; +import com.intellij.lexer.*; +import com.intellij.psi.tree.IElementType; +import static org.intellij.grammar.psi.BnfTypes.*; + + +/** + * This class is a scanner generated by + * JFlex 1.4.3 + * on 28/01/16 11:27 from the specification file + * /home/abigail/code/intellij-grammar-kit-test/src/test/_GrammarLexer.flex + */ +public class _GrammarLexer implements FlexLexer { + /** initial size of the lookahead buffer */ + private static final int ZZ_BUFFERSIZE = 16384; + + /** lexical states */ + public static final int YYINITIAL = 0; + + /** + * ZZ_LEXSTATE[l] is the state in the DFA for the lexical state l + * ZZ_LEXSTATE[l+1] is the state in the DFA for the lexical state l + * at the beginning of a line + * l is of the form l = 2*k, k a non negative integer + */ + private static final int ZZ_LEXSTATE[] = { + 0, 0 + }; + + /** + * Translates characters to character classes + */ + private static final String ZZ_CMAP_PACKED = + "\11\0\1\1\1\1\1\0\1\1\1\1\22\0\1\1\101\0\1\13"+ + "\1\0\1\3\1\14\1\0\1\10\1\0\1\2\3\0\1\12\1\7"+ + "\3\0\1\6\1\4\1\5\1\11\uff8a\0"; + + /** + * Translates characters to character classes + */ + private static final char [] ZZ_CMAP = zzUnpackCMap(ZZ_CMAP_PACKED); + + /** + * Translates DFA states to action switch labels. + */ + private static final int [] ZZ_ACTION = zzUnpackAction(); + + private static final String ZZ_ACTION_PACKED_0 = + "\1\0\1\1\1\2\3\1\1\3\10\0\1\4\1\5"; + + private static int [] zzUnpackAction() { + int [] result = new int[17]; + int offset = 0; + offset = zzUnpackAction(ZZ_ACTION_PACKED_0, offset, result); + return result; + } + + private static int zzUnpackAction(String packed, int offset, int [] result) { + int i = 0; /* index in packed string */ + int j = offset; /* index in unpacked array */ + int l = packed.length(); + while (i < l) { + int count = packed.charAt(i++); + int value = packed.charAt(i++); + do result[j++] = value; while (--count > 0); + } + return j; + } + + + /** + * Translates a state to a row index in the transition table + */ + private static final int [] ZZ_ROWMAP = zzUnpackRowMap(); + + private static final String ZZ_ROWMAP_PACKED_0 = + "\0\0\0\15\0\32\0\47\0\64\0\101\0\15\0\116"+ + "\0\133\0\150\0\165\0\202\0\217\0\234\0\251\0\15"+ + "\0\15"; + + private static int [] zzUnpackRowMap() { + int [] result = new int[17]; + int offset = 0; + offset = zzUnpackRowMap(ZZ_ROWMAP_PACKED_0, offset, result); + return result; + } + + private static int zzUnpackRowMap(String packed, int offset, int [] result) { + int i = 0; /* index in packed string */ + int j = offset; /* index in unpacked array */ + int l = packed.length(); + while (i < l) { + int high = packed.charAt(i++) << 16; + result[j++] = high | packed.charAt(i++); + } + return j; + } + + /** + * The transition table of the DFA + */ + private static final int [] ZZ_TRANS = zzUnpackTrans(); + + private static final String ZZ_TRANS_PACKED_0 = + "\1\2\1\3\1\4\1\2\1\5\2\2\1\6\5\2"+ + "\16\0\1\3\16\0\1\7\16\0\1\10\20\0\1\11"+ + "\11\0\1\12\20\0\1\13\4\0\1\14\25\0\1\15"+ + "\10\0\1\16\21\0\1\17\10\0\1\20\12\0\1\21"+ + "\6\0"; + + private static int [] zzUnpackTrans() { + int [] result = new int[182]; + int offset = 0; + offset = zzUnpackTrans(ZZ_TRANS_PACKED_0, offset, result); + return result; + } + + private static int zzUnpackTrans(String packed, int offset, int [] result) { + int i = 0; /* index in packed string */ + int j = offset; /* index in unpacked array */ + int l = packed.length(); + while (i < l) { + int count = packed.charAt(i++); + int value = packed.charAt(i++); + value--; + do result[j++] = value; while (--count > 0); + } + return j; + } + + + /* error codes */ + private static final int ZZ_UNKNOWN_ERROR = 0; + private static final int ZZ_NO_MATCH = 1; + private static final int ZZ_PUSHBACK_2BIG = 2; + private static final char[] EMPTY_BUFFER = new char[0]; + private static final int YYEOF = -1; + private static java.io.Reader zzReader = null; // Fake + + /* error messages for the codes above */ + private static final String ZZ_ERROR_MSG[] = { + "Unkown internal scanner error", + "Error: could not match input", + "Error: pushback value was too large" + }; + + /** + * ZZ_ATTRIBUTE[aState] contains the attributes of state aState + */ + private static final int [] ZZ_ATTRIBUTE = zzUnpackAttribute(); + + private static final String ZZ_ATTRIBUTE_PACKED_0 = + "\1\0\1\11\4\1\1\11\10\0\2\11"; + + private static int [] zzUnpackAttribute() { + int [] result = new int[17]; + int offset = 0; + offset = zzUnpackAttribute(ZZ_ATTRIBUTE_PACKED_0, offset, result); + return result; + } + + private static int zzUnpackAttribute(String packed, int offset, int [] result) { + int i = 0; /* index in packed string */ + int j = offset; /* index in unpacked array */ + int l = packed.length(); + while (i < l) { + int count = packed.charAt(i++); + int value = packed.charAt(i++); + do result[j++] = value; while (--count > 0); + } + return j; + } + + /** the current state of the DFA */ + private int zzState; + + /** the current lexical state */ + private int zzLexicalState = YYINITIAL; + + /** this buffer contains the current text to be matched and is + the source of the yytext() string */ + private CharSequence zzBuffer = ""; + + /** this buffer may contains the current text array to be matched when it is cheap to acquire it */ + private char[] zzBufferArray; + + /** the textposition at the last accepting state */ + private int zzMarkedPos; + + /** the textposition at the last state to be included in yytext */ + private int zzPushbackPos; + + /** the current text position in the buffer */ + private int zzCurrentPos; + + /** startRead marks the beginning of the yytext() string in the buffer */ + private int zzStartRead; + + /** endRead marks the last character in the buffer, that has been read + from input */ + private int zzEndRead; + + /** + * zzAtBOL == true <=> the scanner is currently at the beginning of a line + */ + private boolean zzAtBOL = true; + + /** zzAtEOF == true <=> the scanner is at the EOF */ + private boolean zzAtEOF; + + /* user code: */ + public _GrammarLexer() { + this((java.io.Reader)null); + } + + + /** + * Creates a new scanner + * + * @param in the java.io.Reader to read input from. + */ + public _GrammarLexer(java.io.Reader in) { + this.zzReader = in; + } + + + /** + * Unpacks the compressed character translation table. + * + * @param packed the packed character translation table + * @return the unpacked character translation table + */ + private static char [] zzUnpackCMap(String packed) { + char [] map = new char[0x10000]; + int i = 0; /* index in packed string */ + int j = 0; /* index in unpacked array */ + while (i < 52) { + int count = packed.charAt(i++); + char value = packed.charAt(i++); + do map[j++] = value; while (--count > 0); + } + return map; + } + + public final int getTokenStart(){ + return zzStartRead; + } + + public final int getTokenEnd(){ + return getTokenStart() + yylength(); + } + + public void reset(CharSequence buffer, int start, int end,int initialState){ + zzBuffer = buffer; + zzBufferArray = com.intellij.util.text.CharArrayUtil.fromSequenceWithoutCopying(buffer); + zzCurrentPos = zzMarkedPos = zzStartRead = start; + zzPushbackPos = 0; + zzAtEOF = false; + zzAtBOL = true; + zzEndRead = end; + yybegin(initialState); + } + + /** + * Refills the input buffer. + * + * @return false, iff there was new input. + * + * @exception java.io.IOException if any I/O-Error occurs + */ + private boolean zzRefill() throws java.io.IOException { + return true; + } + + + /** + * Returns the current lexical state. + */ + public final int yystate() { + return zzLexicalState; + } + + + /** + * Enters a new lexical state + * + * @param newState the new lexical state + */ + public final void yybegin(int newState) { + zzLexicalState = newState; + } + + + /** + * Returns the text matched by the current regular expression. + */ + public final CharSequence yytext() { + return zzBuffer.subSequence(zzStartRead, zzMarkedPos); + } + + + /** + * Returns the character at position pos from the + * matched text. + * + * It is equivalent to yytext().charAt(pos), but faster + * + * @param pos the position of the character to fetch. + * A value from 0 to yylength()-1. + * + * @return the character at position pos + */ + public final char yycharat(int pos) { + return zzBufferArray != null ? zzBufferArray[zzStartRead+pos]:zzBuffer.charAt(zzStartRead+pos); + } + + + /** + * Returns the length of the matched text region. + */ + public final int yylength() { + return zzMarkedPos-zzStartRead; + } + + + /** + * Reports an error that occured while scanning. + * + * In a wellformed scanner (no or only correct usage of + * yypushback(int) and a match-all fallback rule) this method + * will only be called with things that "Can't Possibly Happen". + * If this method is called, something is seriously wrong + * (e.g. a JFlex bug producing a faulty scanner etc.). + * + * Usual syntax/scanner level error handling should be done + * in error fallback rules. + * + * @param errorCode the code of the errormessage to display + */ + private void zzScanError(int errorCode) { + String message; + try { + message = ZZ_ERROR_MSG[errorCode]; + } + catch (ArrayIndexOutOfBoundsException e) { + message = ZZ_ERROR_MSG[ZZ_UNKNOWN_ERROR]; + } + + throw new Error(message); + } + + + /** + * Pushes the specified amount of characters back into the input stream. + * + * They will be read again by then next call of the scanning method + * + * @param number the number of characters to be read again. + * This number must not be greater than yylength()! + */ + public void yypushback(int number) { + if ( number > yylength() ) + zzScanError(ZZ_PUSHBACK_2BIG); + + zzMarkedPos -= number; + } + + + /** + * Resumes scanning until the next regular expression is matched, + * the end of input is encountered or an I/O-Error occurs. + * + * @return the next token + * @exception java.io.IOException if any I/O-Error occurs + */ + public IElementType advance() throws java.io.IOException { + int zzInput; + int zzAction; + + // cached fields: + int zzCurrentPosL; + int zzMarkedPosL; + int zzEndReadL = zzEndRead; + CharSequence zzBufferL = zzBuffer; + char[] zzBufferArrayL = zzBufferArray; + char [] zzCMapL = ZZ_CMAP; + + int [] zzTransL = ZZ_TRANS; + int [] zzRowMapL = ZZ_ROWMAP; + int [] zzAttrL = ZZ_ATTRIBUTE; + + while (true) { + zzMarkedPosL = zzMarkedPos; + + zzAction = -1; + + zzCurrentPosL = zzCurrentPos = zzStartRead = zzMarkedPosL; + + zzState = ZZ_LEXSTATE[zzLexicalState]; + + + zzForAction: { + while (true) { + + if (zzCurrentPosL < zzEndReadL) + zzInput = (zzBufferArrayL != null ? zzBufferArrayL[zzCurrentPosL++] : zzBufferL.charAt(zzCurrentPosL++)); + else if (zzAtEOF) { + zzInput = YYEOF; + break zzForAction; + } + else { + // store back cached positions + zzCurrentPos = zzCurrentPosL; + zzMarkedPos = zzMarkedPosL; + boolean eof = zzRefill(); + // get translated positions and possibly new buffer + zzCurrentPosL = zzCurrentPos; + zzMarkedPosL = zzMarkedPos; + zzBufferL = zzBuffer; + zzEndReadL = zzEndRead; + if (eof) { + zzInput = YYEOF; + break zzForAction; + } + else { + zzInput = (zzBufferArrayL != null ? zzBufferArrayL[zzCurrentPosL++] : zzBufferL.charAt(zzCurrentPosL++)); + } + } + int zzNext = zzTransL[ zzRowMapL[zzState] + zzCMapL[zzInput] ]; + if (zzNext == -1) break zzForAction; + zzState = zzNext; + + int zzAttributes = zzAttrL[zzState]; + if ( (zzAttributes & 1) == 1 ) { + zzAction = zzState; + zzMarkedPosL = zzCurrentPosL; + if ( (zzAttributes & 8) == 8 ) break zzForAction; + } + + } + } + + // store back cached position + zzMarkedPos = zzMarkedPosL; + + switch (zzAction < 0 ? zzAction : ZZ_ACTION[zzAction]) { + case 1: + { return com.intellij.psi.TokenType.BAD_CHARACTER; + } + case 6: break; + case 4: + { return BNF_STRING; + } + case 7: break; + case 5: + { return BNF_NUMBER; + } + case 8: break; + case 3: + { return BNF_ID; + } + case 9: break; + case 2: + { return com.intellij.psi.TokenType.WHITE_SPACE; + } + case 10: break; + default: + if (zzInput == YYEOF && zzStartRead == zzCurrentPos) { + zzAtEOF = true; + return null; + } + else { + zzScanError(ZZ_NO_MATCH); + } + } + } + } + + +} diff --git a/samples/Perl/Sample.pod b/samples/Perl/Sample.pod new file mode 100644 index 00000000..00fa0b99 --- /dev/null +++ b/samples/Perl/Sample.pod @@ -0,0 +1,10 @@ +use strict; +use warnings; +package DZT::Sample; + +sub return_arrayref_of_values_passed { + my $invocant = shift; + return \@_; +} + +1; diff --git a/samples/Perl/PSGI.pod b/samples/Pod/PSGI.pod similarity index 100% rename from samples/Perl/PSGI.pod rename to samples/Pod/PSGI.pod diff --git a/samples/Prolog/queues.yap b/samples/Prolog/queues.yap new file mode 100644 index 00000000..b464152e --- /dev/null +++ b/samples/Prolog/queues.yap @@ -0,0 +1,280 @@ +% This file has been included as an YAP library by Vitor Santos Costa, 1999 + +% File : QUEUES.PL +% Author : R.A.O'Keefe +% Updated: Friday November 18th, 1983, 8:09:31 pm +% Purpose: define queue operations +% Needs : lib(lists) for append/3. + +/** @defgroup Queues Queues +@ingroup library +@{ + +The following queue manipulation routines are available once +included with the `use_module(library(queues))` command. Queues are +implemented with difference lists. + +*/ + +/** + + @pred make_queue(+ _Queue_) + + +Creates a new empty queue. It should only be used to create a new queue. + + +*/ + + +/** @pred empty_queue(+ _Queue_) + + +Tests whether the queue is empty. + + +*/ +/** @pred head_queue(+ _Queue_, ? _Head_) + + +Unifies Head with the first element of the queue. + + +*/ +/** @pred join_queue(+ _Element_, + _OldQueue_, - _NewQueue_) + + +Adds the new element at the end of the queue. + + +*/ +/** @pred jump_queue(+ _Element_, + _OldQueue_, - _NewQueue_) + + +Adds the new element at the front of the list. + + +*/ +/** @pred length_queue(+ _Queue_, - _Length_) + + +Counts the number of elements currently in the queue. + + +*/ +/** @pred list_join_queue(+ _List_, + _OldQueue_, - _NewQueue_) + + +Ads the new elements at the end of the queue. + + +*/ +/** @pred list_jump_queue(+ _List_, + _OldQueue_, + _NewQueue_) + + +Adds all the elements of _List_ at the front of the queue. + + +*/ +/** @pred list_to_queue(+ _List_, - _Queue_) + + +Creates a new queue with the same elements as _List._ + + +*/ +/** @pred queue_to_list(+ _Queue_, - _List_) + + +Creates a new list with the same elements as _Queue_. + + + + + */ +/** @pred serve_queue(+ _OldQueue_, + _Head_, - _NewQueue_) + + +Removes the first element of the queue for service. + + +*/ +:- module(queues, [ + make_queue/1, % create empty queue + join_queue/3, % add element to end of queue + list_join_queue/3, % add many elements to end of queue + jump_queue/3, % add element to front of queue + list_jump_queue/3, % add many elements to front of queue + head_queue/2, % look at first element of queue + serve_queue/3, % remove first element of queue + length_queue/2, % count elements of queue + empty_queue/1, % test whether queue is empty + list_to_queue/2, % convert list to queue + queue_to_list/2 % convert queue to list + ]). + +:- use_module(library(lists), [append/3]). + +/* +:- mode + make_queue(-), + join_queue(+, +, -), + list_join_queue(+, +, -), + jump_queue(+, +, -), + list_jump_queue(+, +, -), + head_queue(+, ?), + serve_queue(+, ?, -), + length_queue(+, ?), + length_queue(+, +, +, -), + empty_queue(+), + list_to_queue(+, -), + queue_to_list(+, -), + queue_to_list(+, +, -). +*/ + +/* In this package, a queue is represented as a term Front-Back, where + Front is a list and Back is a tail of that list, and is normally a + variable. join_queue will only work when the Back is a variable, + the other routines will accept any tail. The elements of the queue + are the list difference, that is, all the elements starting at Front + and stopping at Back. Examples: + + [a,b,c,d,e|Z]-Z has elements a,b,c,d,e + [a,b,c,d,e]-[d,e] has elements a,b,c + Z-Z has no elements + [1,2,3]-[1,2,3] has no elements +*/ + +% make_queue(Queue) +% creates a new empty queue. It will also match empty queues, but +% because Prolog doesn't do the occurs check, it will also match +% other queues, creating circular lists. So this should ONLY be +% used to make new queues. + +make_queue(X-X). + + + +% join_queue(Element, OldQueue, NewQueue) +% adds the new element at the end of the queue. The old queue is +% side-effected, so you *can't* do +% join_queue(1, OldQ, NewQ1), +% join_queue(2, OldQ, NewQ2). +% There isn't any easy way of doing that, sensible though it might +% be. You *can* do +% join_queue(1, OldQ, MidQ), +% join_queue(2, MidQ, NewQ). +% See list_join_queue. + +join_queue(Element, Front-[Element|Back], Front-Back). + + + +% list_join_queue(List, OldQueue, NewQueue) +% adds the new elements at the end of the queue. The elements are +% added in the same order that they appear in the list, e.g. +% list_join_queue([y,z], [a,b,c|M]-M, [a,b,c,y,z|N]-N). + +list_join_queue(List, Front-OldBack, Front-NewBack) :- + append(List, OldBack, NewBack). + + + +% jump_queue(Element, OldQueue, NewQueue) +% adds the new element at the front of the list. Unlike join_queue, +% jump_queue(1, OldQ, NewQ1), +% jump_queue(2, OldQ, NewQ2) +% *does* work, though if you add things at the end of NewQ1 they +% will also show up in NewQ2. Note that +% jump_queue(1, OldQ, MidQ), +% jump_queue(2, MidQ, NewQ) +% makes NewQ start 2, 1, ... + +jump_queue(Element, Front-Back, [Element|Front]-Back). + + + +% list_jump_queue(List, OldQueue, NewQueue) +% adds all the elements of List at the front of the queue. There are +% two ways we might do this. We could add all the elements one at a +% time, so that they would appear at the beginning of the queue in the +% opposite order to the order they had in the list, or we could add +% them in one lump, so that they have the same order in the queue as +% in the list. As you can easily add the elements one at a time if +% that is what you want, I have chosen the latter. + +list_jump_queue(List, OldFront-Back, NewFront-Back) :- + append(List, OldFront, NewFront). +% reverse(List, OldFront, NewFront). % for the other definition + + + +% head_queue(Queue, Head) +% unifies Head with the first element of the queue. The tricky part +% is that we might be at the end of a queue: Back-Back, with Back a +% variable, and in that case this predicate should not succeed, as we +% don't know what that element is or whether it exists yet. + +head_queue(Front-Back, Head) :- + Front \== Back, % the queue is not empty + Front = [Head|_]. + + + +% serve_queue(OldQueue, Head, NewQueue) +% removes the first element of the queue for service. + +serve_queue(OldFront-Back, Head, NewFront-Back) :- + OldFront \== Back, + OldFront = [Head|NewFront]. + + + +% empty_queue(Queue) +% tests whether the queue is empty. If the back of a queue were +% guaranteed to be a variable, we could have +% empty_queue(Front-Back) :- var(Front). +% but I don't see why you shouldn't be able to treat difference +% lists as queues if you want to. + +empty_queue(Front-Back) :- + Front == Back. + + + +% length_queue(Queue, Length) +% counts the number of elements currently in the queue. Note that +% we have to be careful in checking for the end of the list, we +% can't test for [] the way length(List) does. + +length_queue(Front-Back, Length) :- + length_queue(Front, Back, 0, N), + Length = N. + +length_queue(Front, Back, N, N) :- + Front == Back, !. +length_queue([_|Front], Back, K, N) :- + L is K+1, + length_queue(Front, Back, L, N). + + + +% list_to_queue(List, Queue) +% creates a new queue with the same elements as List. + +list_to_queue(List, Front-Back) :- + append(List, Back, Front). + + + +% queue_to_list(Queue, List) +% creates a new list with the same elements as Queue. + +queue_to_list(Front-Back, List) :- + queue_to_list(Front, Back, List). + +queue_to_list(Front, Back, Ans) :- + Front == Back, !, Ans = []. +queue_to_list([Head|Front], Back, [Head|Tail]) :- + queue_to_list(Front, Back, Tail). + diff --git a/samples/Python/closure_js_binary.bzl b/samples/Python/closure_js_binary.bzl new file mode 100644 index 00000000..c4453ffb --- /dev/null +++ b/samples/Python/closure_js_binary.bzl @@ -0,0 +1,124 @@ +# Copyright 2015 The Bazel Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Build definitions for JavaScript binaries compiled with the Closure Compiler. + +A single file is produced with the _compiled.js suffix. + +By default, the name of the entry point is assumed to be the same as that of the +build target. This behaviour may be overridden with the "main" attribute. + +The optimization level may be set with the "compilation_level" attribute. +Supported values are: unobfuscated, simple, and advanced. + +Example: + + closure_js_binary( + name = "hello", + compilation_level = "simple", + language_in = "ecmascript6", + language_out = "ecmascript3", + externs = ["//third_party/javascript/google_cast/cast.js"], + deps = [ + "@closure_library//:closure_library", + ":hello_lib", + ], + ) + +This rule will produce hello_combined.js. +""" + +_COMPILATION_LEVELS = { + "whitespace_only": [ + "--compilation_level=WHITESPACE_ONLY", + "--formatting=PRETTY_PRINT" + ], + "simple": ["--compilation_level=SIMPLE"], + "advanced": ["--compilation_level=ADVANCED"] +} + +_SUPPORTED_LANGUAGES = { + "es3": ["ES3"], + "ecmascript3": ["ECMASCRIPT3"], + "es5": ["ES5"], + "ecmascript5": ["ECMASCRIPT5"], + "es5_strict": ["ES5_STRICT"], + "ecmascript5_strict": ["ECMASCRIPT5_STRICT"], + "es6": ["ES6"], + "ecmascript6": ["ECMASCRIPT6"], + "es6_strict": ["ES6_STRICT"], + "ecmascript6_strict": ["ECMASCRIPT6_STRICT"], + "es6_typed": ["ES6_TYPED"], + "ecmascript6_typed": ["ECMASCRIPT6_TYPED"], +} + +def _impl(ctx): + externs = set(order="compile") + srcs = set(order="compile") + for dep in ctx.attr.deps: + externs += dep.transitive_js_externs + srcs += dep.transitive_js_srcs + + args = [ + "--entry_point=goog:%s" % ctx.attr.main, + "--js_output_file=%s" % ctx.outputs.out.path, + "--dependency_mode=LOOSE", + "--warning_level=VERBOSE", + ] + (["--js=%s" % src.path for src in srcs] + + ["--externs=%s" % extern.path for extern in externs]) + + # Set the compilation level. + if ctx.attr.compilation_level in _COMPILATION_LEVELS: + args += _COMPILATION_LEVELS[ctx.attr.compilation_level] + else: + fail("Invalid compilation_level '%s', expected one of %s" % + (ctx.attr.compilation_level, _COMPILATION_LEVELS.keys())) + + # Set the language in. + if ctx.attr.language_in in _SUPPORTED_LANGUAGES: + args += "--language_in=" + _SUPPORTED_LANGUAGES[ctx.attr.language_in] + else: + fail("Invalid language_in '%s', expected one of %s" % + (ctx.attr.language_in, _SUPPORTED_LANGUAGES.keys())) + + # Set the language out. + if ctx.attr.language_out in _SUPPORTED_LANGUAGES: + args += "--language_out=" + _SUPPORTED_LANGUAGES[ctx.attr.language_out] + else: + fail("Invalid language_out '%s', expected one of %s" % + (ctx.attr.language_out, _SUPPORTED_LANGUAGES.keys())) + + ctx.action( + inputs=list(srcs) + list(externs), + outputs=[ctx.outputs.out], + arguments=args, + executable=ctx.executable._closure_compiler) + + return struct(files=set([ctx.outputs.out])) + +closure_js_binary = rule( + implementation=_impl, + attrs={ + "deps": attr.label_list( + allow_files=False, + providers=["transitive_js_externs", "transitive_js_srcs"]), + "main": attr.string(default="%{name}"), + "compilation_level": attr.string(default="advanced"), + "language_in": attr.string(default="ecmascript6"), + "language_out": attr.string(default="ecmascript3"), + "_closure_compiler": attr.label( + default=Label("//external:closure_compiler_"), + executable=True), + }, + outputs={"out": "%{name}_combined.js"}) diff --git a/samples/Uno/PlayerPads.uno b/samples/Uno/PlayerPads.uno new file mode 100644 index 00000000..0d71a25e --- /dev/null +++ b/samples/Uno/PlayerPads.uno @@ -0,0 +1,154 @@ +using Uno; +using Uno.Collections; +using Uno.Graphics; +using Uno.Scenes; +using Uno.Designer; +using Uno.Content; +using Uno.Content.Models; +using Uno.UI; + +namespace PONG2D +{ + public class PlayerPads : Node + { + + Image _player1Image; + Image _player2Image; + + [Inline] + public Image Player1 + { + get { return _player1Image; } + set + { + if (_player1Image != value) + { + _player1Image = value; + + } + } + } + + [Inline] + public Image Player2 + { + get { return _player2Image; } + set + { + if (_player2Image != value) + { + _player2Image = value; + + } + } + } + + [Hide] + public float2 Player1Pos + { + get { return (Player1.ActualPosition); } + set + { + if (Player1 != null) + Player1.Position = value; + + } + } + + [Hide] + public float2 Player2Pos + { + get { return (Player2.ActualPosition); } + set + { + if (Player2 != null) + Player2.Position = value; + + } + } + + public Rect Player1Rect + { + get { return new Rect(Player1Pos, float2(Player1.Width, Player2.Height)); } + set + { + Player1Pos = value.Position; + if (Player1 != null) + { + Player1.Width = value.Size.X; + Player1.Height = value.Size.Y; + } + } + } + + public Rect Player2Rect + { + get { return new Rect(Player2Pos, float2(Player2.Width, Player2.Height)); } + set + { + Player2Pos = value.Position; + if (Player2 != null) + { + Player2.Width = value.Size.X; + Player2.Height = value.Size.Y; + } + } + } + + public Ball Ball + { + get; + set; + } + + public float PadVelocity { get; set; } + + public PlayerPads() + { + + } + + void UpdatePositions() + { + + } + + protected override void OnUpdate() + { + base.OnUpdate(); + + if (Input.IsKeyDown(Uno.Platform.Key.W)) + { + Player1Pos = float2(0, Player1Pos.Y - PadVelocity); + } + + if (Input.IsKeyDown(Uno.Platform.Key.S)) + { + Player1Pos = float2(0, Player1Pos.Y + PadVelocity); + } + + if (Input.IsKeyDown(Uno.Platform.Key.Up)) + { + Player2Pos = float2(0, Player2Pos.Y - PadVelocity); + } + + if (Input.IsKeyDown(Uno.Platform.Key.Down)) + { + Player2Pos = float2(0, Player2Pos.Y + PadVelocity); + } + + if (Ball != null) + { + + if (Ball.BallRectangle.Intersects(Player1Rect) || + Ball.BallRectangle.Intersects(Player2Rect)) + { + + Ball.BallVelocity = float2(Ball.BallVelocity.X * -1f, Ball.BallVelocity.Y); + } + } + + } + + } +} \ No newline at end of file diff --git a/samples/Uno/Pong.uno b/samples/Uno/Pong.uno new file mode 100644 index 00000000..4e578250 --- /dev/null +++ b/samples/Uno/Pong.uno @@ -0,0 +1,139 @@ +using Uno; +using Uno.Collections; +using Uno.Graphics; +using Uno.Scenes; +using Uno.Content; +using Uno.Content.Models; + +namespace PONG2D +{ + public class Pong : Node + { + float2 _player1Pos; + float2 _player2Pos; + float2 ballPosition; + float2 ballVelocity; + float2 rectangleSize; + + Rect player1Rect; + Rect player2Rect; + Rect ballRect; + + float2 resolution = Context.VirtualResolution; + + Random random = new Random(1); + + + float2 Player1Pos + { + get { return _player1Pos; } + set + { + _player1Pos = Math.Clamp(value, float2(0, 0), resolution - rectangleSize); + } + } + + float2 Player2Pos + { + get { return _player2Pos; } + set + { + _player2Pos = Math.Clamp(value, float2(0, 0), resolution - rectangleSize); + } + } + + public Pong() + { + Uno.Scenes.Input.AddGlobalListener(this); + } + + protected override void OnInitialize() + { + base.OnInitialize(); + UpdateValues(); + + } + + void UpdateValues() + { + rectangleSize = float2(resolution.X / 80f, resolution.Y / 5f); + _player1Pos = float2(0f); + _player2Pos = float2(Context.VirtualResolution.X - rectangleSize.X, 0f); + + player1Rect = new Rect(_player1Pos, rectangleSize); + player2Rect = new Rect(_player2Pos, rectangleSize); + + ballPosition = float2(resolution.X * 0.5f - 10f, resolution.Y * 0.5f - 10f); + ballRect = new Rect(ballPosition, float2(20f)); + + + SpwanBall(); + + } + + void SpwanBall() + { + ballRect.Position = float2(resolution.X * 0.5f - 10f, resolution.Y * 0.5f - 10f); + ballVelocity = float2(5f, 10f) * 0.5f; + } + + void OnWindowResize(object sender, EventArgs args) + { + //UpdateValues(); + } + + protected override void OnUpdate() + { + base.OnUpdate(); + + var padVelocity = resolution.Y * (float)Application.Current.FrameInterval * 4f; + if (Input.IsKeyDown(Uno.Platform.Key.Up)) + { + Player1Pos = float2(Player1Pos.X, Player1Pos.Y - padVelocity); + } + + if (Input.IsKeyDown(Uno.Platform.Key.Down)) + { + Player1Pos = float2(Player1Pos.X, Player1Pos.Y + padVelocity); + } + + if (Input.IsKeyDown(Uno.Platform.Key.W)) + { + Player2Pos = float2(Player2Pos.X, Player2Pos.Y - padVelocity); + } + + if (Input.IsKeyDown(Uno.Platform.Key.S)) + { + Player2Pos = float2(Player2Pos.X, Player2Pos.Y + padVelocity); + } + player1Rect.Position = Player1Pos; + player2Rect.Position = Player2Pos; + + if (ballRect.Position.X > resolution.X || ballRect.Position.X < 0) + { + SpwanBall(); + } + if (ballRect.Position.Y > resolution.Y || + ballRect.Position.Y < 0) + { + ballVelocity.Y *= -1f; + } + + if (ballRect.Intersects(player1Rect) || + ballRect.Intersects(player2Rect)) + { + ballVelocity.X *= -1f; + } + + ballRect.Position += ballVelocity; + + } + + protected override void OnDraw() + { + Uno.Drawing.RoundedRectangle.Draw(player1Rect.Position, player1Rect.Size, float4(1f), 0); + Uno.Drawing.RoundedRectangle.Draw(player2Rect.Position, player2Rect.Size, float4(1f), 0); + Uno.Drawing.RoundedRectangle.Draw(ballRect.Position, ballRect.Size, float4(1f), 0f); + } + } +} \ No newline at end of file diff --git a/samples/Uno/TowerBlock.uno b/samples/Uno/TowerBlock.uno new file mode 100644 index 00000000..a25d6849 --- /dev/null +++ b/samples/Uno/TowerBlock.uno @@ -0,0 +1,136 @@ +using Uno; +using Uno.Collections; +using Uno.Graphics; +using Uno.Scenes; +using Uno.Content; +using Uno.Content.Models; +using Uno.Physics.Box2D; + +using TowerBuilder.Box2DMath; + +namespace TowerBuilder +{ + public class TowerBlock : TestBed + { + Body floorBody, deleteBody, mouseBody; + + private List bodies = new List(); + private List bodiesToDelete = new List(); + + private ContactListener contactListener; + + protected override void OnInitializeTestBed() + { + World.Gravity = float2(0, -25.0f); + World.ContactListener = contactListener = new ContactListener(this); + + bodies.Clear(); + bodiesToDelete.Clear(); + + CreateFloor(); + CreateDeleteBody(); + CreateBox2(); + } + + void CreateFloor() + { + var bodyDef = new BodyDef(); + bodyDef.position = float2(0, -40.0f); + + floorBody = World.CreateBody(bodyDef); + + var shape = new PolygonShape(); + shape.SetAsBox(30.0f, 10.0f); + + var fixtureDef = new FixtureDef(); + fixtureDef.shape = shape; + fixtureDef.density = 1.0f; + + floorBody.CreateFixture(fixtureDef); + } + + void CreateDeleteBody() + { + var bodyDef = new BodyDef(); + bodyDef.position = float2(0, -44.0f); + + deleteBody = World.CreateBody(bodyDef); + + var shape = new PolygonShape(); + shape.SetAsBox(200.0f, 10.0f); + + var fixtureDef = new FixtureDef(); + fixtureDef.shape = shape; + fixtureDef.density = 1.0f; + + deleteBody.CreateFixture(fixtureDef); + } + + Random random = new Random((int) (Uno.Diagnostics.Clock.GetSeconds() * 1000000)); + void CreateBox2() + { + var bodyDef = new BodyDef(); + bodyDef.type = BodyType.Dynamic; + bodyDef.position = float2(random.NextFloat(-25f, 25f), 50.0f); + bodyDef.angularVelocity = random.NextFloat() * 40 - 20; + bodyDef.userData = float3(0, 0, 0); + + var body = World.CreateBody(bodyDef); + + var shape = new PolygonShape(); + shape.SetAsBox(0.75f, 0.75f); + + var fixtureDef = new FixtureDef(); + fixtureDef.shape = shape; + fixtureDef.density = 5.0f; + //fixtureDef.friction = 0.75f; + + body.CreateFixture(fixtureDef); + + bodies.Add(body); + } + + private int c = 0; + protected override void OnFixedUpdate() + { + base.OnFixedUpdate(); + + debug_log bodies.Count; + if(c++ % 8 == 0 && bodies.Count < 20) CreateBox2(); + + foreach(var body in bodiesToDelete) + { + World.DestroyBody(body); + bodies.Remove(body); + } + + bodiesToDelete.Clear(); + } + + public class ContactListener : IContactListener + { + private TowerBlock b; + public ContactListener(TowerBlock b) + { + this.b = b; + } + + public void BeginContact(Contact contact) + { + if(contact.GetFixtureA().GetBody() == b.deleteBody) + { + b.bodiesToDelete.Add(contact.GetFixtureB().GetBody()); + } + else if(contact.GetFixtureB().GetBody() == b.deleteBody) + { + b.bodiesToDelete.Add(contact.GetFixtureA().GetBody()); + } + } + + public void EndContact(Contact contact) {} + public void PreSolve(Contact contact, ref Manifold manifold) {} + public void PostSolve(Contact contact, ref ContactImpulse impulse) {} + } + + } +} diff --git a/samples/XML/MainView.ux b/samples/XML/MainView.ux new file mode 100644 index 00000000..d71edf1f --- /dev/null +++ b/samples/XML/MainView.ux @@ -0,0 +1,57 @@ + + + + + + + + + + + + + + + + + + + + relativeNode + + + + + + + + RelativeTo="Size" + RelativeTo="ParentSize" + RelativeTo="Keyboard" + RelativeTo="PositionChange" + RelativeNode="relativeNode" + + + + + + + + + + + + + + + + + + + diff --git a/samples/XML/MyApp.ux b/samples/XML/MyApp.ux new file mode 100644 index 00000000..0e7b0088 --- /dev/null +++ b/samples/XML/MyApp.ux @@ -0,0 +1,11 @@ + + + + + + + This is an example of EdgeNavigator! + + + + diff --git a/samples/YANG/sfc-lisp-impl.yang b/samples/YANG/sfc-lisp-impl.yang new file mode 100644 index 00000000..761d9829 --- /dev/null +++ b/samples/YANG/sfc-lisp-impl.yang @@ -0,0 +1,55 @@ +module sfc-lisp-impl { + + yang-version 1; + namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sfc-lisp:impl"; + prefix "sfc-lisp-impl"; + + import config { prefix config; revision-date 2013-04-05; } + import rpc-context { prefix rpcx; revision-date 2013-06-17; } + import opendaylight-md-sal-binding { prefix mdsal; revision-date 2013-10-28; } + + + description + "This module contains the base YANG definitions for + sfc-lisp implementation."; + + revision "2015-04-27" { + description + "Initial revision."; + } + + // This is the definition of the service implementation as a module identity + identity sfc-lisp-impl { + base config:module-type; + + // Specifies the prefix for generated java classes. + config:java-name-prefix SfcLisp; + } + + + // Augments the 'configuration' choice node under modules/module. + augment "/config:modules/config:module/config:configuration" { + case sfc-lisp-impl { + when "/config:modules/config:module/config:type = 'sfc-lisp-impl'"; + + //wires in the data-broker service + container data-broker { + uses config:service-ref { + refine type { + mandatory false; + config:required-identity mdsal:binding-async-data-broker; + } + } + } + + container rpc-registry { + uses config:service-ref { + refine type { + mandatory true; + config:required-identity mdsal:binding-rpc-registry; + } + } + } + } + } +} \ No newline at end of file diff --git a/HACKING.rst.txt b/samples/reStructuredText/HACKING.rst.txt similarity index 100% rename from HACKING.rst.txt rename to samples/reStructuredText/HACKING.rst.txt diff --git a/test/fixtures/Data/Modelines/ruby4 b/test/fixtures/Data/Modelines/ruby4 new file mode 100644 index 00000000..e3b50151 --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby4 @@ -0,0 +1,3 @@ +# vim: filetype=ruby + +# I am Ruby diff --git a/test/fixtures/Data/Modelines/ruby5 b/test/fixtures/Data/Modelines/ruby5 new file mode 100644 index 00000000..10349050 --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby5 @@ -0,0 +1,3 @@ +# vim: ft=ruby + +# I am Ruby diff --git a/test/fixtures/Data/Modelines/ruby6 b/test/fixtures/Data/Modelines/ruby6 new file mode 100644 index 00000000..a2b49dae --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby6 @@ -0,0 +1,3 @@ +# vim: syntax=Ruby + +# I am Ruby diff --git a/test/fixtures/Data/Modelines/ruby7 b/test/fixtures/Data/Modelines/ruby7 new file mode 100644 index 00000000..1ed5b28f --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby7 @@ -0,0 +1,3 @@ +# vim: se syntax=ruby: + +# I am Ruby diff --git a/test/fixtures/Data/Modelines/ruby8 b/test/fixtures/Data/Modelines/ruby8 new file mode 100644 index 00000000..8e854741 --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby8 @@ -0,0 +1,3 @@ +# vim: set syntax=ruby: + +# I am Ruby diff --git a/test/fixtures/Data/Modelines/ruby9 b/test/fixtures/Data/Modelines/ruby9 new file mode 100644 index 00000000..ac82358d --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby9 @@ -0,0 +1,3 @@ +# ex: syntax=ruby + +# I am Ruby diff --git a/test/test_generated.rb b/test/test_generated.rb index 164e1fc2..591f50ed 100644 --- a/test/test_generated.rb +++ b/test/test_generated.rb @@ -69,5 +69,11 @@ class TestGenerated < Minitest::Test # Specflow generated_fixture_without_loading_data("Features/BindingCulture.feature.cs") + + # JFlex + generated_sample_loading_data("Java/JFlexLexer.java") + + # GrammarKit + generated_sample_loading_data("Java/GrammarKit.java") end end diff --git a/test/test_grammars.rb b/test/test_grammars.rb index a2ad1e53..29211cc2 100644 --- a/test/test_grammars.rb +++ b/test/test_grammars.rb @@ -113,6 +113,20 @@ class TestGrammars < Minitest::Test assert_equal [], licensed, msg end + def test_submodules_use_https_links + File.open(".gitmodules", "r") do |fh| + ssh_submodules = [] + fh.each_line do |line| + if matches = line.match(/url = (git@.*)/) + submodule_link = matches.captures[0] + ssh_submodules.push(submodule_link) + end + end + msg = "The following submodules don't have an HTTPS link:\n* #{ssh_submodules.join("\n* ")}" + assert_equal [], ssh_submodules, msg + end + end + private def submodule_paths diff --git a/test/test_heuristics.rb b/test/test_heuristics.rb index 524a522a..702320e9 100644 --- a/test/test_heuristics.rb +++ b/test/test_heuristics.rb @@ -155,6 +155,14 @@ class TestHeuristcs < Minitest::Test }) end + # Candidate languages = ["Pod", "Perl"] + def test_pod_by_heuristics + assert_heuristics({ + "Perl" => all_fixtures("Perl", "*.pod"), + "Pod" => all_fixtures("Pod", "*.pod") + }) + end + # Candidate languages = ["IDL", "Prolog", "QMake", "INI"] def test_pro_by_heuristics assert_heuristics({ diff --git a/test/test_modelines.rb b/test/test_modelines.rb index fd259782..85718955 100644 --- a/test/test_modelines.rb +++ b/test/test_modelines.rb @@ -11,6 +11,12 @@ class TestModelines < Minitest::Test assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby") assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby2") assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby3") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby4") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby5") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby6") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby7") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby8") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby9") assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplus") assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs1") assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs2") diff --git a/vendor/grammars/AutoHotkey b/vendor/grammars/AutoHotkey index 77b8f2d7..4da62de2 160000 --- a/vendor/grammars/AutoHotkey +++ b/vendor/grammars/AutoHotkey @@ -1 +1 @@ -Subproject commit 77b8f2d7857e9251e3a6b9047f3eca5f76f0be43 +Subproject commit 4da62de23dc705bf9b95e76cf5e8e51aa1e68fea diff --git a/vendor/grammars/Elm.tmLanguage b/vendor/grammars/Elm.tmLanguage index 494145bd..437033bd 160000 --- a/vendor/grammars/Elm.tmLanguage +++ b/vendor/grammars/Elm.tmLanguage @@ -1 +1 @@ -Subproject commit 494145bded21fd6e18454d9d9d5aa8b67ced1b96 +Subproject commit 437033bd48350b49bc0dfa734206bfa0ba5de337 diff --git a/vendor/grammars/Handlebars b/vendor/grammars/Handlebars index 7e5396d9..2e9f6884 160000 --- a/vendor/grammars/Handlebars +++ b/vendor/grammars/Handlebars @@ -1 +1 @@ -Subproject commit 7e5396d9be8b05c705be614afb022f755a7edc17 +Subproject commit 2e9f68840073f5a3de13cbfed10e31b199760654 diff --git a/vendor/grammars/MagicPython b/vendor/grammars/MagicPython index d7855f98..82c76aff 160000 --- a/vendor/grammars/MagicPython +++ b/vendor/grammars/MagicPython @@ -1 +1 @@ -Subproject commit d7855f98ce6fbe0703c2753b0deae263a27fd8f8 +Subproject commit 82c76aff704192fb9ed1f505360635f575f13b5a diff --git a/vendor/grammars/NimLime b/vendor/grammars/NimLime index 4e60e565..4db349dd 160000 --- a/vendor/grammars/NimLime +++ b/vendor/grammars/NimLime @@ -1 +1 @@ -Subproject commit 4e60e5657fcddde6bf8b4bce0030ecb3154e7dfa +Subproject commit 4db349dda5219a37e99a0375e2a5d8a001fbf20e diff --git a/vendor/grammars/SublimePapyrus b/vendor/grammars/SublimePapyrus index 75d567dc..293a45f6 160000 --- a/vendor/grammars/SublimePapyrus +++ b/vendor/grammars/SublimePapyrus @@ -1 +1 @@ -Subproject commit 75d567dc3a890acbc6edebedad2879b0af426766 +Subproject commit 293a45f665de3fb4e1eaaf37bf152b3861a70d7a diff --git a/vendor/grammars/atom-fsharp b/vendor/grammars/atom-fsharp index 4376748e..13eb4d43 160000 --- a/vendor/grammars/atom-fsharp +++ b/vendor/grammars/atom-fsharp @@ -1 +1 @@ -Subproject commit 4376748ed29b5e104cd0dd807c8c9f09a09ec361 +Subproject commit 13eb4d4347be4e8dd825ba174e4271f9cb0f9cab diff --git a/vendor/grammars/atom-language-stan b/vendor/grammars/atom-language-stan index f6911bef..f8d855ea 160000 --- a/vendor/grammars/atom-language-stan +++ b/vendor/grammars/atom-language-stan @@ -1 +1 @@ -Subproject commit f6911bef091e155098f113ec5aaa89bd55283705 +Subproject commit f8d855eab960b4dd140c0f469a809401544850b8 diff --git a/vendor/grammars/c.tmbundle b/vendor/grammars/c.tmbundle index f1424d49..5b5d3663 160000 --- a/vendor/grammars/c.tmbundle +++ b/vendor/grammars/c.tmbundle @@ -1 +1 @@ -Subproject commit f1424d49dd879f6c5a67d3afe6c48a17f538345a +Subproject commit 5b5d366309e8a549ce6fff5bcd9aa57ffb6dae77 diff --git a/vendor/grammars/chapel-tmbundle b/vendor/grammars/chapel-tmbundle index b657738c..469476b2 160000 --- a/vendor/grammars/chapel-tmbundle +++ b/vendor/grammars/chapel-tmbundle @@ -1 +1 @@ -Subproject commit b657738cc05cddd8d8a20afa307e93e607d17d97 +Subproject commit 469476b285adf6c4a09973fd12e97ec831afd050 diff --git a/vendor/grammars/d.tmbundle b/vendor/grammars/d.tmbundle index e6551b27..bc27d5e5 160000 --- a/vendor/grammars/d.tmbundle +++ b/vendor/grammars/d.tmbundle @@ -1 +1 @@ -Subproject commit e6551b27e30960a272758afb7da323aa402d70e6 +Subproject commit bc27d5e552b28eaceb76cfe93bd7c27a1105b57d diff --git a/vendor/grammars/elixir-tmbundle b/vendor/grammars/elixir-tmbundle index 13d94e0e..a7aa16e3 160000 --- a/vendor/grammars/elixir-tmbundle +++ b/vendor/grammars/elixir-tmbundle @@ -1 +1 @@ -Subproject commit 13d94e0ec98c2447adc80230708afa94012ec825 +Subproject commit a7aa16e349f673d70dd43fcab72e2a3f219cb9d7 diff --git a/vendor/grammars/factor b/vendor/grammars/factor index 7b289493..97d1ec75 160000 --- a/vendor/grammars/factor +++ b/vendor/grammars/factor @@ -1 +1 @@ -Subproject commit 7b289493316955db1649eb2745a2db7a78a4c9f9 +Subproject commit 97d1ec759eb9fa2ace83c62685b6b36faec05981 diff --git a/vendor/grammars/gap-tmbundle b/vendor/grammars/gap-tmbundle index ce4ff698..52c8fafb 160000 --- a/vendor/grammars/gap-tmbundle +++ b/vendor/grammars/gap-tmbundle @@ -1 +1 @@ -Subproject commit ce4ff698ebb4a692f0b9a3ab5e30853b87fd2b34 +Subproject commit 52c8fafb664fb7909223f92403e26fe3bfde0cdc diff --git a/vendor/grammars/html.tmbundle b/vendor/grammars/html.tmbundle index 36b90bc1..7710c79b 160000 --- a/vendor/grammars/html.tmbundle +++ b/vendor/grammars/html.tmbundle @@ -1 +1 @@ -Subproject commit 36b90bc113cdd59dcaeb0885103f9b6a773939e1 +Subproject commit 7710c79b161314fe937b05b13b1101e78f5dc62e diff --git a/vendor/grammars/language-babel b/vendor/grammars/language-babel index 20c649bc..e2fd09d7 160000 --- a/vendor/grammars/language-babel +++ b/vendor/grammars/language-babel @@ -1 +1 @@ -Subproject commit 20c649bcc79b9fd9a72e4b6e1fa11e14d553a9d8 +Subproject commit e2fd09d7d9caf1f336b3a35581e55387723dcbf3 diff --git a/vendor/grammars/language-coffee-script b/vendor/grammars/language-coffee-script index afa4f6f1..93e935ec 160000 --- a/vendor/grammars/language-coffee-script +++ b/vendor/grammars/language-coffee-script @@ -1 +1 @@ -Subproject commit afa4f6f157e02fd1a635357e8117e4c726a84542 +Subproject commit 93e935ec1d54815c182d683d3dfb9583db4e269b diff --git a/vendor/grammars/language-crystal b/vendor/grammars/language-crystal index 5c0e60fd..f87d6864 160000 --- a/vendor/grammars/language-crystal +++ b/vendor/grammars/language-crystal @@ -1 +1 @@ -Subproject commit 5c0e60fd5323b5f093317ffda9721db3fcd72672 +Subproject commit f87d68645c9a4243a7942e6b414449f1eec8a3e7 diff --git a/vendor/grammars/language-gfm b/vendor/grammars/language-gfm index e5b24d57..298a8a3e 160000 --- a/vendor/grammars/language-gfm +++ b/vendor/grammars/language-gfm @@ -1 +1 @@ -Subproject commit e5b24d57e73463f747fae061d6e9d59a74b9f927 +Subproject commit 298a8a3eb180f1fa6b8a8bc77c2147e355c8cafd diff --git a/vendor/grammars/language-inform7 b/vendor/grammars/language-inform7 index b953a1ef..857864cd 160000 --- a/vendor/grammars/language-inform7 +++ b/vendor/grammars/language-inform7 @@ -1 +1 @@ -Subproject commit b953a1efedcff21091ba3b7e7fbcd1040c1f02bb +Subproject commit 857864cd812279672284f424da4f65e3c81b6c87 diff --git a/vendor/grammars/language-javascript b/vendor/grammars/language-javascript index 162309ab..f68e4bfe 160000 --- a/vendor/grammars/language-javascript +++ b/vendor/grammars/language-javascript @@ -1 +1 @@ -Subproject commit 162309ab8525c0f1b8602514e6aa347c9889437e +Subproject commit f68e4bfe54a3b9d16450223f401d2fb16453897f diff --git a/vendor/grammars/language-jsoniq b/vendor/grammars/language-jsoniq index 7a971acf..008a2458 160000 --- a/vendor/grammars/language-jsoniq +++ b/vendor/grammars/language-jsoniq @@ -1 +1 @@ -Subproject commit 7a971acf1c2001e6b2e9afc7b69b6ff8a2ae39ce +Subproject commit 008a24589d32a8f8bbd55985d27844a7cda842e5 diff --git a/vendor/grammars/language-less b/vendor/grammars/language-less new file mode 160000 index 00000000..a4ded260 --- /dev/null +++ b/vendor/grammars/language-less @@ -0,0 +1 @@ +Subproject commit a4ded2608cd22b8dba79df2025ad8dcf62158ddb diff --git a/vendor/grammars/language-python b/vendor/grammars/language-python index cdb699e7..75f0d2b0 160000 --- a/vendor/grammars/language-python +++ b/vendor/grammars/language-python @@ -1 +1 @@ -Subproject commit cdb699e7a86fd9f9f84ae561abddb696aad777aa +Subproject commit 75f0d2b06122a51db6e8e0b129b57585cd68f99c diff --git a/vendor/grammars/language-renpy b/vendor/grammars/language-renpy index 00e92d74..cc2f1c69 160000 --- a/vendor/grammars/language-renpy +++ b/vendor/grammars/language-renpy @@ -1 +1 @@ -Subproject commit 00e92d7450a97c33b40931113b64034bac27e010 +Subproject commit cc2f1c69f0b1c1d121aa5648422fc70d86dca7cf diff --git a/vendor/grammars/language-yaml b/vendor/grammars/language-yaml index e1d62e5a..249fdeed 160000 --- a/vendor/grammars/language-yaml +++ b/vendor/grammars/language-yaml @@ -1 +1 @@ -Subproject commit e1d62e5aff1c475ea3eedc3b03a52ce0e750ec89 +Subproject commit 249fdeed7877ccdcba123645f32cc6597bce4b37 diff --git a/vendor/grammars/language-yang b/vendor/grammars/language-yang new file mode 160000 index 00000000..c2d4a701 --- /dev/null +++ b/vendor/grammars/language-yang @@ -0,0 +1 @@ +Subproject commit c2d4a701d15e0ea7a1cef4f2fe4396b336504d7c diff --git a/vendor/grammars/latex.tmbundle b/vendor/grammars/latex.tmbundle index 3be8cd92..82986b93 160000 --- a/vendor/grammars/latex.tmbundle +++ b/vendor/grammars/latex.tmbundle @@ -1 +1 @@ -Subproject commit 3be8cd9208fb2aa5e9fe4ebd0074f55433bca7e0 +Subproject commit 82986b93a4f4ae7aab52445d8b7742b9af635d05 diff --git a/vendor/grammars/less.tmbundle b/vendor/grammars/less.tmbundle deleted file mode 160000 index 7ef97ad5..00000000 --- a/vendor/grammars/less.tmbundle +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 7ef97ad5f15d2a136afe4d5cf568fc8ee79675b7 diff --git a/vendor/grammars/objective-c.tmbundle b/vendor/grammars/objective-c.tmbundle index 1bade8a1..fdcedb95 160000 --- a/vendor/grammars/objective-c.tmbundle +++ b/vendor/grammars/objective-c.tmbundle @@ -1 +1 @@ -Subproject commit 1bade8a1c919c358fc4a6d83ba93e98e419ffede +Subproject commit fdcedb95de8846220c49f769fee91045188767d9 diff --git a/vendor/grammars/perl6fe b/vendor/grammars/perl6fe new file mode 160000 index 00000000..12fa46f7 --- /dev/null +++ b/vendor/grammars/perl6fe @@ -0,0 +1 @@ +Subproject commit 12fa46f7bbf646616df1120ed8cfee3e1fcb75de diff --git a/vendor/grammars/php.tmbundle b/vendor/grammars/php.tmbundle index 2ecaa60d..3ed4837b 160000 --- a/vendor/grammars/php.tmbundle +++ b/vendor/grammars/php.tmbundle @@ -1 +1 @@ -Subproject commit 2ecaa60d92b92d4c07f243207ba1d5b2114bb70a +Subproject commit 3ed4837b43d3f650ebb525b068636281942883a0 diff --git a/vendor/grammars/powershell b/vendor/grammars/powershell index 982ae21d..b4ea52c5 160000 --- a/vendor/grammars/powershell +++ b/vendor/grammars/powershell @@ -1 +1 @@ -Subproject commit 982ae21d54b3affc498131515ebbfca6b186ac16 +Subproject commit b4ea52c51c5111fdda1d24103082d9580472c31b diff --git a/vendor/grammars/sql.tmbundle b/vendor/grammars/sql.tmbundle index 1fd3f03f..6d4edbc1 160000 --- a/vendor/grammars/sql.tmbundle +++ b/vendor/grammars/sql.tmbundle @@ -1 +1 @@ -Subproject commit 1fd3f03f97e37fde2c7f2dd4c11728a19242d320 +Subproject commit 6d4edbc113d3272f7c097d6b1504624289ee2bc5 diff --git a/vendor/grammars/sublime-nginx b/vendor/grammars/sublime-nginx index ba28d29d..fcf644ec 160000 --- a/vendor/grammars/sublime-nginx +++ b/vendor/grammars/sublime-nginx @@ -1 +1 @@ -Subproject commit ba28d29de729f01d39a29fa214a6818112b48803 +Subproject commit fcf644ecea021ab8a6bc171f415f8df0b005b31e diff --git a/vendor/grammars/sublime-pony b/vendor/grammars/sublime-pony index 384ba3ed..b5eb8f8e 160000 --- a/vendor/grammars/sublime-pony +++ b/vendor/grammars/sublime-pony @@ -1 +1 @@ -Subproject commit 384ba3ed980189f0cc140d3fb86455f880fffeb0 +Subproject commit b5eb8f8e97e5253de8d81cfa4826cfb5815f2944 diff --git a/vendor/grammars/sublime-rust b/vendor/grammars/sublime-rust index 06a278cc..621e4f61 160000 --- a/vendor/grammars/sublime-rust +++ b/vendor/grammars/sublime-rust @@ -1 +1 @@ -Subproject commit 06a278ccfaf5e542d26a95d66c734b4407bc4912 +Subproject commit 621e4f6117531d8fe299eb5584a6be766df1822e diff --git a/vendor/grammars/sublime-typescript b/vendor/grammars/sublime-typescript index d2c76b8c..26fd717a 160000 --- a/vendor/grammars/sublime-typescript +++ b/vendor/grammars/sublime-typescript @@ -1 +1 @@ -Subproject commit d2c76b8c8262c9a848d4814662a0c8e13d1e2a6a +Subproject commit 26fd717a79d1984e76bbe6d958c5c4bbf0179049 diff --git a/vendor/grammars/sublime_cobol b/vendor/grammars/sublime_cobol index ad15734a..3d2b6dbc 160000 --- a/vendor/grammars/sublime_cobol +++ b/vendor/grammars/sublime_cobol @@ -1 +1 @@ -Subproject commit ad15734a4a9798a006525f53a968565fee1411b1 +Subproject commit 3d2b6dbcd1b27023150ff9d8ab47953706d070b8 diff --git a/vendor/grammars/swift.tmbundle b/vendor/grammars/swift.tmbundle index e6375f4d..32141e91 160000 --- a/vendor/grammars/swift.tmbundle +++ b/vendor/grammars/swift.tmbundle @@ -1 +1 @@ -Subproject commit e6375f4dbb9c5a715e1924b1e9fc1056f0b3dee7 +Subproject commit 32141e91eeffbbd29482d5b1a0131eec4613e73e diff --git a/vendor/grammars/vue-syntax-highlight b/vendor/grammars/vue-syntax-highlight index a2336ddc..f20c9bab 160000 --- a/vendor/grammars/vue-syntax-highlight +++ b/vendor/grammars/vue-syntax-highlight @@ -1 +1 @@ -Subproject commit a2336ddc7ea01876caa31793f806250cad3b53e6 +Subproject commit f20c9bab7e71738f421e6edc1aab8839ee05d85a