diff --git a/.gitmodules b/.gitmodules index 8234378a..41e39127 100644 --- a/.gitmodules +++ b/.gitmodules @@ -25,9 +25,6 @@ [submodule "vendor/grammars/Sublime-REBOL"] path = vendor/grammars/Sublime-REBOL url = https://github.com/Oldes/Sublime-REBOL -[submodule "vendor/grammars/Sublime-Inform"] - path = vendor/grammars/Sublime-Inform - url = https://github.com/PogiNate/Sublime-Inform [submodule "vendor/grammars/autoitv3-tmbundle"] path = vendor/grammars/autoitv3-tmbundle url = https://github.com/Red-Nova-Technologies/autoitv3-tmbundle @@ -343,9 +340,6 @@ [submodule "vendor/grammars/latex.tmbundle"] path = vendor/grammars/latex.tmbundle url = https://github.com/textmate/latex.tmbundle -[submodule "vendor/grammars/less.tmbundle"] - path = vendor/grammars/less.tmbundle - url = https://github.com/textmate/less.tmbundle [submodule "vendor/grammars/lilypond.tmbundle"] path = vendor/grammars/lilypond.tmbundle url = https://github.com/textmate/lilypond.tmbundle @@ -658,7 +652,7 @@ url = https://github.com/rpavlick/language-ncl.git [submodule "vendor/grammars/atom-language-purescript"] path = vendor/grammars/atom-language-purescript - url = https://github.com/freebroccolo/atom-language-purescript + url = https://github.com/purescript-contrib/atom-language-purescript [submodule "vendor/grammars/vue-syntax-highlight"] path = vendor/grammars/vue-syntax-highlight url = https://github.com/vuejs/vue-syntax-highlight @@ -679,7 +673,7 @@ url = https://github.com/CausalityLtd/sublime-pony [submodule "vendor/grammars/X10"] path = vendor/grammars/X10 - url = git@github.com:x10-lang/x10-highlighting.git + url = https://github.com/x10-lang/x10-highlighting [submodule "vendor/grammars/language-babel"] path = vendor/grammars/language-babel url = https://github.com/gandm/language-babel @@ -692,6 +686,9 @@ [submodule "vendor/grammars/FreeMarker.tmbundle"] path = vendor/grammars/FreeMarker.tmbundle url = https://github.com/freemarker/FreeMarker.tmbundle +[submodule "vendor/grammars/MagicPython"] + path = vendor/grammars/MagicPython + url = https://github.com/MagicStack/MagicPython [submodule "vendor/grammars/language-click"] path = vendor/grammars/language-click url = https://github.com/stenverbois/language-click.git @@ -701,3 +698,18 @@ [submodule "vendor/grammars/language-renpy"] path = vendor/grammars/language-renpy url = https://github.com/williamd1k0/language-renpy.git +[submodule "vendor/grammars/language-inform7"] + path = vendor/grammars/language-inform7 + url = https://github.com/erkyrath/language-inform7 +[submodule "vendor/grammars/atom-language-stan"] + path = vendor/grammars/atom-language-stan + url = https://github.com/jrnold/atom-language-stan +[submodule "vendor/grammars/language-yang"] + path = vendor/grammars/language-yang + url = https://github.com/DzonyKalafut/language-yang.git +[submodule "vendor/grammars/perl6fe"] + path = vendor/grammars/perl6fe + url = https://github.com/MadcapJake/language-perl6fe.git +[submodule "vendor/grammars/language-less"] + path = vendor/grammars/language-less + url = https://github.com/atom/language-less.git diff --git a/LICENSE b/LICENSE index c0a52444..ca0844d1 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2011-2015 GitHub, Inc. +Copyright (c) 2011-2016 GitHub, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation diff --git a/README.md b/README.md index 36bcbd50..2d661ef9 100644 --- a/README.md +++ b/README.md @@ -59,6 +59,9 @@ Alternatively, you can use Vim or Emacs style modelines to set the language for ##### Vim ``` +# Some examples of various styles: +vim: syntax=java +vim: set syntax=ruby: vim: set filetype=prolog: vim: set ft=cpp: ``` diff --git a/grammars.yml b/grammars.yml index 8f7b1173..6139fb0a 100644 --- a/grammars.yml +++ b/grammars.yml @@ -69,6 +69,9 @@ vendor/grammars/Lean.tmbundle: - source.lean vendor/grammars/LiveScript.tmbundle: - source.livescript +vendor/grammars/MagicPython: +- source.python +- source.regexp.python vendor/grammars/Modelica/: - source.modelica vendor/grammars/NSIS: @@ -103,8 +106,6 @@ vendor/grammars/Sublime-Coq: - source.coq vendor/grammars/Sublime-HTTP: - source.httpspec -vendor/grammars/Sublime-Inform: -- source.Inform7 vendor/grammars/Sublime-Lasso: - file.lasso vendor/grammars/Sublime-Logos: @@ -187,6 +188,8 @@ vendor/grammars/atom-fsharp/: - source.fsharp.fsx vendor/grammars/atom-language-purescript/: - source.purescript +vendor/grammars/atom-language-stan/: +- source.stan vendor/grammars/atom-salt: - source.python.salt - source.yaml.salt @@ -342,6 +345,8 @@ vendor/grammars/language-gfm: - source.gfm vendor/grammars/language-hy: - source.hy +vendor/grammars/language-inform7: +- source.inform7 vendor/grammars/language-javascript: - source.js - source.js.regexp @@ -349,13 +354,13 @@ vendor/grammars/language-javascript: vendor/grammars/language-jsoniq/: - source.jq - source.xq +vendor/grammars/language-less/: +- source.css.less vendor/grammars/language-maxscript: - source.maxscript vendor/grammars/language-ncl: - source.ncl vendor/grammars/language-python: -- source.python -- source.regexp.python - text.python.console - text.python.traceback vendor/grammars/language-renpy: @@ -369,6 +374,8 @@ vendor/grammars/language-xbase: - source.harbour vendor/grammars/language-yaml: - source.yaml +vendor/grammars/language-yang/: +- source.yang vendor/grammars/latex.tmbundle: - text.bibtex - text.log.latex @@ -376,8 +383,6 @@ vendor/grammars/latex.tmbundle: - text.tex.latex - text.tex.latex.beamer - text.tex.latex.memoir -vendor/grammars/less.tmbundle: -- source.css.less vendor/grammars/lilypond.tmbundle: - source.lilypond vendor/grammars/liquid.tmbundle: @@ -440,6 +445,10 @@ vendor/grammars/pascal.tmbundle: vendor/grammars/perl.tmbundle/: - source.perl - source.perl.6 +vendor/grammars/perl6fe: +- source.meta-info +- source.perl6fe +- source.regexp.perl6fe vendor/grammars/php-smarty.tmbundle: - text.html.smarty vendor/grammars/php.tmbundle: diff --git a/lib/linguist.rb b/lib/linguist.rb index 3929efb9..589869c6 100644 --- a/lib/linguist.rb +++ b/lib/linguist.rb @@ -13,8 +13,8 @@ class << Linguist def instrument(*args, &bk) if instrumenter instrumenter.instrument(*args, &bk) - else - yield if block_given? + elsif block_given? + yield end end end diff --git a/lib/linguist/generated.rb b/lib/linguist/generated.rb index 35766e4d..7747406f 100644 --- a/lib/linguist/generated.rb +++ b/lib/linguist/generated.rb @@ -72,7 +72,9 @@ module Linguist vcr_cassette? || generated_module? || generated_unity3d_meta? || - generated_racc? + generated_racc? || + generated_jflex? || + generated_grammarkit? end # Internal: Is the blob an Xcode file? @@ -373,5 +375,32 @@ module Linguist return false unless lines.count > 2 return lines[2].start_with?("# This file is automatically generated by Racc") end + + # Internal: Is this a JFlex-generated file? + # + # A JFlex-generated file contains: + # /* The following code was generated by JFlex x.y.z on d/at/e ti:me */ + # on the first line. + # + # Return true or false + def generated_jflex? + return false unless extname == '.java' + return false unless lines.count > 1 + return lines[0].start_with?("/* The following code was generated by JFlex ") + end + + # Internal: Is this a GrammarKit-generated file? + # + # A GrammarKit-generated file typically contain: + # // This is a generated file. Not intended for manual editing. + # on the first line. This is not always the case, as it's possible to + # customize the class header. + # + # Return true or false + def generated_grammarkit? + return false unless extname == '.java' + return false unless lines.count > 1 + return lines[0].start_with?("// This is a generated file. Not intended for manual editing.") + end end end diff --git a/lib/linguist/heuristics.rb b/lib/linguist/heuristics.rb index 7c1c7db1..0806ce1f 100644 --- a/lib/linguist/heuristics.rb +++ b/lib/linguist/heuristics.rb @@ -276,19 +276,27 @@ module Linguist end disambiguate ".pl" do |data| - if /^(use v6|(my )?class|module)/.match(data) - Language["Perl6"] + if /^[^#]+:-/.match(data) + Language["Prolog"] elsif /use strict|use\s+v?5\./.match(data) Language["Perl"] - elsif /^[^#]+:-/.match(data) - Language["Prolog"] + elsif /^(use v6|(my )?class|module)/.match(data) + Language["Perl6"] end end disambiguate ".pm", ".t" do |data| - if /^(use v6|(my )?class|module)/.match(data) + if /use strict|use\s+v?5\./.match(data) + Language["Perl"] + elsif /^(use v6|(my )?class|module)/.match(data) Language["Perl6"] - elsif /use strict|use\s+v?5\./.match(data) + end + end + + disambiguate ".pod" do |data| + if /^=\w+$/.match(data) + Language["Pod"] + else Language["Perl"] end end diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 7a33cf1f..b497d88b 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -183,6 +183,7 @@ AppleScript: interpreters: - osascript ace_mode: applescript + color: "#101F1F" Arc: type: programming @@ -290,6 +291,7 @@ Batchfile: - .cmd tm_scope: source.dosbatch ace_mode: batchfile + color: "#C1F12E" Befunge: type: programming @@ -304,6 +306,7 @@ Bison: extensions: - .bison ace_mode: text + color: "#6A463F" BitBake: type: programming @@ -638,7 +641,7 @@ Common Lisp: Component Pascal: type: programming - color: "#b0ce4e" + color: "#B0CE4E" extensions: - .cp - .cps @@ -701,6 +704,7 @@ Cucumber: aliases: - gherkin ace_mode: text + color: "#5B2063" Cuda: type: programming @@ -709,6 +713,7 @@ Cuda: - .cuh tm_scope: source.cuda-c++ ace_mode: c_cpp + color: "#3A4E3A" Cycript: type: programming @@ -801,7 +806,6 @@ Dart: Diff: type: data - color: "#88dddd" extensions: - .diff - .patch @@ -939,6 +943,8 @@ Erlang: - .es - .escript - .hrl + - .xrl + - .yrl filenames: - rebar.config - rebar.config.lock @@ -1390,6 +1396,7 @@ Hack: - .hh - .php tm_scope: text.html.php + color: "#878787" Haml: group: HTML @@ -1398,6 +1405,7 @@ Haml: - .haml - .haml.deface ace_mode: haml + color: "#ECE2A9" Handlebars: type: markup @@ -1510,7 +1518,7 @@ Inform 7: extensions: - .ni - .i7x - tm_scope: source.Inform7 + tm_scope: source.inform7 aliases: - i7 - inform7 @@ -1764,6 +1772,7 @@ LLVM: extensions: - .ll ace_mode: text + color: "#185619" LOLCODE: type: programming @@ -1827,6 +1836,7 @@ Less: - .less tm_scope: source.css.less ace_mode: less + color: "#A1D9A1" Lex: type: programming @@ -2019,6 +2029,8 @@ Makefile: - GNUmakefile - Kbuild - Makefile + - Makefile.am + - Makefile.in - Makefile.inc - makefile interpreters: @@ -2045,6 +2057,7 @@ Markdown: - .mkdown - .ron tm_scope: source.gfm + color: "#083FA1" Mask: type: markup @@ -2061,6 +2074,7 @@ Mathematica: - .cdf - .m - .ma + - .mt - .nb - .nbp - .wl @@ -2107,6 +2121,7 @@ MediaWiki: wrap: true extensions: - .mediawiki + - .wiki tm_scope: text.html.mediawiki ace_mode: text @@ -2282,6 +2297,7 @@ Nginx: aliases: - nginx configuration file ace_mode: text + color: "#9469E9" Nimrod: type: programming @@ -2340,6 +2356,7 @@ NumPy: - .numsc tm_scope: none ace_mode: text + color: "#9C8AF9" OCaml: type: programming @@ -2590,7 +2607,7 @@ Parrot Internal Representation: Pascal: type: programming - color: "#b0ce4e" + color: "#E3F171" extensions: - .pas - .dfm @@ -2639,7 +2656,7 @@ Perl6: - Rexfile interpreters: - perl6 - tm_scope: source.perl.6 + tm_scope: source.perl6fe ace_mode: perl Pickle: @@ -2805,6 +2822,7 @@ Python: color: "#3572A5" extensions: - .py + - .bzl - .cgi - .fcgi - .gyp @@ -2858,7 +2876,7 @@ QMake: R: type: programming - color: "#198ce7" + color: "#198CE7" aliases: - R - Rscript @@ -2888,6 +2906,7 @@ RDoc: extensions: - .rdoc tm_scope: text.rdoc + color: "#8E84BF" REALbasic: type: programming @@ -3079,6 +3098,7 @@ Rust: color: "#dea584" extensions: - .rs + - .rs.in ace_mode: rust SAS: @@ -3096,6 +3116,7 @@ SCSS: ace_mode: scss extensions: - .scss + color: "#CF649A" SMT: type: programming @@ -3198,6 +3219,7 @@ Sass: extensions: - .sass ace_mode: sass + color: "#CF649A" Scala: type: programming @@ -3255,6 +3277,7 @@ Shell: color: "#89e051" aliases: - sh + - shell-script - bash - zsh extensions: @@ -3265,6 +3288,7 @@ Shell: - .command - .fcgi - .ksh + - .sh.in - .tmux - .tool - .zsh @@ -3354,6 +3378,14 @@ Squirrel: tm_scope: source.c++ ace_mode: c_cpp +Stan: + type: programming + color: "#b2011d" + extensions: + - .stan + ace_mode: text + tm_scope: source.stan + Standard ML: type: programming color: "#dc566d" @@ -3555,7 +3587,6 @@ Unified Parallel C: Unity3D Asset: type: data ace_mode: yaml - color: "#ab69a1" extensions: - .anim - .asset @@ -3787,6 +3818,7 @@ XML: - .xliff - .xmi - .xml.dist + - .xproj - .xsd - .xul - .zcml @@ -3843,6 +3875,7 @@ XSLT: - .xsl tm_scope: text.xml.xsl ace_mode: xml + color: "#EB8CEB" Xojo: type: programming @@ -3876,6 +3909,13 @@ YAML: - .yaml-tmlanguage ace_mode: yaml +YANG: + type: data + extensions: + - .yang + tm_scope: source.yang + ace_mode: text + Yacc: type: programming extensions: @@ -3884,6 +3924,7 @@ Yacc: - .yy tm_scope: source.bison ace_mode: text + color: "#4B6C4B" Zephir: type: programming @@ -3923,7 +3964,6 @@ eC: edn: type: data ace_mode: clojure - color: "#db5855" extensions: - .edn tm_scope: source.clojure @@ -3965,7 +4005,10 @@ reStructuredText: extensions: - .rst - .rest + - .rest.txt + - .rst.txt ace_mode: text + color: "#B3BCBC" wisp: type: programming diff --git a/lib/linguist/lazy_blob.rb b/lib/linguist/lazy_blob.rb index 28fb78f3..e828dca6 100644 --- a/lib/linguist/lazy_blob.rb +++ b/lib/linguist/lazy_blob.rb @@ -86,8 +86,8 @@ module Linguist protected # Returns true if the attribute is present and not the string "false". - def boolean_attribute(attr) - attr != "false" + def boolean_attribute(attribute) + attribute != "false" end def load_blob! diff --git a/lib/linguist/strategy/modeline.rb b/lib/linguist/strategy/modeline.rb index f995d940..eb5a8a5f 100644 --- a/lib/linguist/strategy/modeline.rb +++ b/lib/linguist/strategy/modeline.rb @@ -1,8 +1,19 @@ module Linguist module Strategy class Modeline - EmacsModeline = /-\*-\s*(?:(?!mode)[\w-]+\s*:\s*(?:[\w+-]+)\s*;?\s*)*(?:mode\s*:)?\s*([\w+-]+)\s*(?:;\s*(?!mode)[\w-]+\s*:\s*[\w+-]+\s*)*;?\s*-\*-/i - VimModeline = /vim:\s*set.*\s(?:ft|filetype)=(\w+)\s?.*:/i + EMACS_MODELINE = /-\*-\s*(?:(?!mode)[\w-]+\s*:\s*(?:[\w+-]+)\s*;?\s*)*(?:mode\s*:)?\s*([\w+-]+)\s*(?:;\s*(?!mode)[\w-]+\s*:\s*[\w+-]+\s*)*;?\s*-\*-/i + + # First form vim modeline + # [text]{white}{vi:|vim:|ex:}[white]{options} + # ex: 'vim: syntax=ruby' + VIM_MODELINE_1 = /(?:vim|vi|ex):\s*(?:ft|filetype|syntax)=(\w+)\s?/i + + # Second form vim modeline (compatible with some versions of Vi) + # [text]{white}{vi:|vim:|Vim:|ex:}[white]se[t] {options}:[text] + # ex: 'vim set syntax=ruby:' + VIM_MODELINE_2 = /(?:vim|vi|Vim|ex):\s*se(?:t)?.*\s(?:ft|filetype|syntax)=(\w+)\s?.*:/i + + MODELINES = [EMACS_MODELINE, VIM_MODELINE_1, VIM_MODELINE_2] # Public: Detects language based on Vim and Emacs modelines # @@ -22,7 +33,7 @@ module Linguist # # Returns a String or nil def self.modeline(data) - match = data.match(EmacsModeline) || data.match(VimModeline) + match = MODELINES.map { |regex| data.match(regex) }.reject(&:nil?).first match[1] if match end end diff --git a/lib/linguist/version.rb b/lib/linguist/version.rb index b389c1c9..bc1707a9 100644 --- a/lib/linguist/version.rb +++ b/lib/linguist/version.rb @@ -1,3 +1,3 @@ module Linguist - VERSION = "4.7.3" + VERSION = "4.7.5" end diff --git a/samples/Erlang/elixir_parser.yrl b/samples/Erlang/elixir_parser.yrl new file mode 100644 index 00000000..f49f8539 --- /dev/null +++ b/samples/Erlang/elixir_parser.yrl @@ -0,0 +1,856 @@ +Nonterminals + grammar expr_list + expr container_expr block_expr access_expr + no_parens_expr no_parens_zero_expr no_parens_one_expr no_parens_one_ambig_expr + bracket_expr bracket_at_expr bracket_arg matched_expr unmatched_expr max_expr + unmatched_op_expr matched_op_expr no_parens_op_expr no_parens_many_expr + comp_op_eol at_op_eol unary_op_eol and_op_eol or_op_eol capture_op_eol + add_op_eol mult_op_eol two_op_eol three_op_eol pipe_op_eol stab_op_eol + arrow_op_eol match_op_eol when_op_eol in_op_eol in_match_op_eol + type_op_eol rel_op_eol + open_paren close_paren empty_paren eoe + list list_args open_bracket close_bracket + tuple open_curly close_curly + bit_string open_bit close_bit + map map_op map_close map_args map_expr struct_op + assoc_op_eol assoc_expr assoc_base assoc_update assoc_update_kw assoc + container_args_base container_args + call_args_parens_expr call_args_parens_base call_args_parens parens_call + call_args_no_parens_one call_args_no_parens_ambig call_args_no_parens_expr + call_args_no_parens_comma_expr call_args_no_parens_all call_args_no_parens_many + call_args_no_parens_many_strict + stab stab_eoe stab_expr stab_op_eol_and_expr stab_parens_many + kw_eol kw_base kw call_args_no_parens_kw_expr call_args_no_parens_kw + dot_op dot_alias dot_alias_container + dot_identifier dot_op_identifier dot_do_identifier + dot_paren_identifier dot_bracket_identifier + do_block fn_eoe do_eoe end_eoe block_eoe block_item block_list + . + +Terminals + identifier kw_identifier kw_identifier_safe kw_identifier_unsafe bracket_identifier + paren_identifier do_identifier block_identifier + fn 'end' aliases + number atom atom_safe atom_unsafe bin_string list_string sigil + dot_call_op op_identifier + comp_op at_op unary_op and_op or_op arrow_op match_op in_op in_match_op + type_op dual_op add_op mult_op two_op three_op pipe_op stab_op when_op assoc_op + capture_op rel_op + 'true' 'false' 'nil' 'do' eol ';' ',' '.' + '(' ')' '[' ']' '{' '}' '<<' '>>' '%{}' '%' + . + +Rootsymbol grammar. + +%% Two shift/reduce conflicts coming from call_args_parens. +Expect 2. + +%% Changes in ops and precedence should be reflected on lib/elixir/lib/macro.ex +%% Note though the operator => in practice has lower precedence than all others, +%% its entry in the table is only to support the %{user | foo => bar} syntax. +Left 5 do. +Right 10 stab_op_eol. %% -> +Left 20 ','. +Nonassoc 30 capture_op_eol. %% & +Left 40 in_match_op_eol. %% <-, \\ (allowed in matches along =) +Right 50 when_op_eol. %% when +Right 60 type_op_eol. %% :: +Right 70 pipe_op_eol. %% | +Right 80 assoc_op_eol. %% => +Right 90 match_op_eol. %% = +Left 130 or_op_eol. %% ||, |||, or +Left 140 and_op_eol. %% &&, &&&, and +Left 150 comp_op_eol. %% ==, !=, =~, ===, !== +Left 160 rel_op_eol. %% <, >, <=, >= +Left 170 arrow_op_eol. %% |>, <<<, >>>, ~>>, <<~, ~>, <~, <~>, <|> +Left 180 in_op_eol. %% in +Left 190 three_op_eol. %% ^^^ +Right 200 two_op_eol. %% ++, --, .., <> +Left 210 add_op_eol. %% +, - +Left 220 mult_op_eol. %% *, / +Nonassoc 300 unary_op_eol. %% +, -, !, ^, not, ~~~ +Left 310 dot_call_op. +Left 310 dot_op. %% . +Nonassoc 320 at_op_eol. %% @ +Nonassoc 330 dot_identifier. + +%%% MAIN FLOW OF EXPRESSIONS + +grammar -> eoe : nil. +grammar -> expr_list : to_block('$1'). +grammar -> eoe expr_list : to_block('$2'). +grammar -> expr_list eoe : to_block('$1'). +grammar -> eoe expr_list eoe : to_block('$2'). +grammar -> '$empty' : nil. + +% Note expressions are on reverse order +expr_list -> expr : ['$1']. +expr_list -> expr_list eoe expr : ['$3'|'$1']. + +expr -> matched_expr : '$1'. +expr -> no_parens_expr : '$1'. +expr -> unmatched_expr : '$1'. + +%% In Elixir we have three main call syntaxes: with parentheses, +%% without parentheses and with do blocks. They are represented +%% in the AST as matched, no_parens and unmatched. +%% +%% Calls without parentheses are further divided according to how +%% problematic they are: +%% +%% (a) no_parens_one: a call with one unproblematic argument +%% (e.g. `f a` or `f g a` and similar) (includes unary operators) +%% +%% (b) no_parens_many: a call with several arguments (e.g. `f a, b`) +%% +%% (c) no_parens_one_ambig: a call with one argument which is +%% itself a no_parens_many or no_parens_one_ambig (e.g. `f g a, b` +%% or `f g h a, b` and similar) +%% +%% Note, in particular, that no_parens_one_ambig expressions are +%% ambiguous and are interpreted such that the outer function has +%% arity 1 (e.g. `f g a, b` is interpreted as `f(g(a, b))` rather +%% than `f(g(a), b)`). Hence the name, no_parens_one_ambig. +%% +%% The distinction is required because we can't, for example, have +%% a function call with a do block as argument inside another do +%% block call, unless there are parentheses: +%% +%% if if true do true else false end do #=> invalid +%% if(if true do true else false end) do #=> valid +%% +%% Similarly, it is not possible to nest calls without parentheses +%% if their arity is more than 1: +%% +%% foo a, bar b, c #=> invalid +%% foo(a, bar b, c) #=> invalid +%% foo bar a, b #=> valid +%% foo a, bar(b, c) #=> valid +%% +%% So the different grammar rules need to take into account +%% if calls without parentheses are do blocks in particular +%% segments and act accordingly. +matched_expr -> matched_expr matched_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +matched_expr -> unary_op_eol matched_expr : build_unary_op('$1', '$2'). +matched_expr -> at_op_eol matched_expr : build_unary_op('$1', '$2'). +matched_expr -> capture_op_eol matched_expr : build_unary_op('$1', '$2'). +matched_expr -> no_parens_one_expr : '$1'. +matched_expr -> no_parens_zero_expr : '$1'. +matched_expr -> access_expr : '$1'. +matched_expr -> access_expr kw_identifier : throw_invalid_kw_identifier('$2'). + +unmatched_expr -> matched_expr unmatched_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +unmatched_expr -> unmatched_expr matched_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +unmatched_expr -> unmatched_expr unmatched_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +unmatched_expr -> unmatched_expr no_parens_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +unmatched_expr -> unary_op_eol expr : build_unary_op('$1', '$2'). +unmatched_expr -> at_op_eol expr : build_unary_op('$1', '$2'). +unmatched_expr -> capture_op_eol expr : build_unary_op('$1', '$2'). +unmatched_expr -> block_expr : '$1'. + +no_parens_expr -> matched_expr no_parens_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +no_parens_expr -> unary_op_eol no_parens_expr : build_unary_op('$1', '$2'). +no_parens_expr -> at_op_eol no_parens_expr : build_unary_op('$1', '$2'). +no_parens_expr -> capture_op_eol no_parens_expr : build_unary_op('$1', '$2'). +no_parens_expr -> no_parens_one_ambig_expr : '$1'. +no_parens_expr -> no_parens_many_expr : '$1'. + +block_expr -> parens_call call_args_parens do_block : build_identifier('$1', '$2' ++ '$3'). +block_expr -> parens_call call_args_parens call_args_parens do_block : build_nested_parens('$1', '$2', '$3' ++ '$4'). +block_expr -> dot_do_identifier do_block : build_identifier('$1', '$2'). +block_expr -> dot_identifier call_args_no_parens_all do_block : build_identifier('$1', '$2' ++ '$3'). + +matched_op_expr -> match_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> add_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> mult_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> two_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> three_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> and_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> or_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> in_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> in_match_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> type_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> when_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> pipe_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> comp_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> rel_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> arrow_op_eol matched_expr : {'$1', '$2'}. +%% Warn for no parens subset +matched_op_expr -> arrow_op_eol no_parens_one_expr : warn_pipe('$1', '$2'), {'$1', '$2'}. + +unmatched_op_expr -> match_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> add_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> mult_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> two_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> three_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> and_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> or_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> in_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> in_match_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> type_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> when_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> pipe_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> comp_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> rel_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> arrow_op_eol unmatched_expr : {'$1', '$2'}. + +no_parens_op_expr -> match_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> add_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> mult_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> two_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> three_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> and_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> or_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> in_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> in_match_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> type_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> when_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> pipe_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> comp_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> rel_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> arrow_op_eol no_parens_expr : {'$1', '$2'}. +%% Warn for no parens subset +no_parens_op_expr -> arrow_op_eol no_parens_one_ambig_expr : warn_pipe('$1', '$2'), {'$1', '$2'}. +no_parens_op_expr -> arrow_op_eol no_parens_many_expr : warn_pipe('$1', '$2'), {'$1', '$2'}. + +%% Allow when (and only when) with keywords +no_parens_op_expr -> when_op_eol call_args_no_parens_kw : {'$1', '$2'}. + +no_parens_one_ambig_expr -> dot_op_identifier call_args_no_parens_ambig : build_identifier('$1', '$2'). +no_parens_one_ambig_expr -> dot_identifier call_args_no_parens_ambig : build_identifier('$1', '$2'). + +no_parens_many_expr -> dot_op_identifier call_args_no_parens_many_strict : build_identifier('$1', '$2'). +no_parens_many_expr -> dot_identifier call_args_no_parens_many_strict : build_identifier('$1', '$2'). + +no_parens_one_expr -> dot_op_identifier call_args_no_parens_one : build_identifier('$1', '$2'). +no_parens_one_expr -> dot_identifier call_args_no_parens_one : build_identifier('$1', '$2'). +no_parens_zero_expr -> dot_do_identifier : build_identifier('$1', nil). +no_parens_zero_expr -> dot_identifier : build_identifier('$1', nil). + +%% From this point on, we just have constructs that can be +%% used with the access syntax. Notice that (dot_)identifier +%% is not included in this list simply because the tokenizer +%% marks identifiers followed by brackets as bracket_identifier. +access_expr -> bracket_at_expr : '$1'. +access_expr -> bracket_expr : '$1'. +access_expr -> at_op_eol number : build_unary_op('$1', ?exprs('$2')). +access_expr -> unary_op_eol number : build_unary_op('$1', ?exprs('$2')). +access_expr -> capture_op_eol number : build_unary_op('$1', ?exprs('$2')). +access_expr -> fn_eoe stab end_eoe : build_fn('$1', reverse('$2')). +access_expr -> open_paren stab close_paren : build_stab(reverse('$2')). +access_expr -> open_paren stab ';' close_paren : build_stab(reverse('$2')). +access_expr -> open_paren ';' stab ';' close_paren : build_stab(reverse('$3')). +access_expr -> open_paren ';' stab close_paren : build_stab(reverse('$3')). +access_expr -> open_paren ';' close_paren : build_stab([]). +access_expr -> empty_paren : nil. +access_expr -> number : ?exprs('$1'). +access_expr -> list : element(1, '$1'). +access_expr -> map : '$1'. +access_expr -> tuple : '$1'. +access_expr -> 'true' : ?id('$1'). +access_expr -> 'false' : ?id('$1'). +access_expr -> 'nil' : ?id('$1'). +access_expr -> bin_string : build_bin_string('$1'). +access_expr -> list_string : build_list_string('$1'). +access_expr -> bit_string : '$1'. +access_expr -> sigil : build_sigil('$1'). +access_expr -> max_expr : '$1'. + +%% Aliases and properly formed calls. Used by map_expr. +max_expr -> atom : ?exprs('$1'). +max_expr -> atom_safe : build_quoted_atom('$1', true). +max_expr -> atom_unsafe : build_quoted_atom('$1', false). +max_expr -> parens_call call_args_parens : build_identifier('$1', '$2'). +max_expr -> parens_call call_args_parens call_args_parens : build_nested_parens('$1', '$2', '$3'). +max_expr -> dot_alias : '$1'. + +bracket_arg -> open_bracket kw close_bracket : build_list('$1', '$2'). +bracket_arg -> open_bracket container_expr close_bracket : build_list('$1', '$2'). +bracket_arg -> open_bracket container_expr ',' close_bracket : build_list('$1', '$2'). + +bracket_expr -> dot_bracket_identifier bracket_arg : build_access(build_identifier('$1', nil), '$2'). +bracket_expr -> access_expr bracket_arg : build_access('$1', '$2'). + +bracket_at_expr -> at_op_eol dot_bracket_identifier bracket_arg : + build_access(build_unary_op('$1', build_identifier('$2', nil)), '$3'). +bracket_at_expr -> at_op_eol access_expr bracket_arg : + build_access(build_unary_op('$1', '$2'), '$3'). + +%% Blocks + +do_block -> do_eoe 'end' : [[{do, nil}]]. +do_block -> do_eoe stab end_eoe : [[{do, build_stab(reverse('$2'))}]]. +do_block -> do_eoe block_list 'end' : [[{do, nil}|'$2']]. +do_block -> do_eoe stab_eoe block_list 'end' : [[{do, build_stab(reverse('$2'))}|'$3']]. + +eoe -> eol : '$1'. +eoe -> ';' : '$1'. +eoe -> eol ';' : '$1'. + +fn_eoe -> 'fn' : '$1'. +fn_eoe -> 'fn' eoe : '$1'. + +do_eoe -> 'do' : '$1'. +do_eoe -> 'do' eoe : '$1'. + +end_eoe -> 'end' : '$1'. +end_eoe -> eoe 'end' : '$2'. + +block_eoe -> block_identifier : '$1'. +block_eoe -> block_identifier eoe : '$1'. + +stab -> stab_expr : ['$1']. +stab -> stab eoe stab_expr : ['$3'|'$1']. + +stab_eoe -> stab : '$1'. +stab_eoe -> stab eoe : '$1'. + +%% Here, `element(1, Token)` is the stab operator, +%% while `element(2, Token)` is the expression. +stab_expr -> expr : + '$1'. +stab_expr -> stab_op_eol_and_expr : + build_op(element(1, '$1'), [], element(2, '$1')). +stab_expr -> empty_paren stab_op_eol_and_expr : + build_op(element(1, '$2'), [], element(2, '$2')). +stab_expr -> call_args_no_parens_all stab_op_eol_and_expr : + build_op(element(1, '$2'), unwrap_when(unwrap_splice('$1')), element(2, '$2')). +stab_expr -> stab_parens_many stab_op_eol_and_expr : + build_op(element(1, '$2'), unwrap_splice('$1'), element(2, '$2')). +stab_expr -> stab_parens_many when_op expr stab_op_eol_and_expr : + build_op(element(1, '$4'), [{'when', meta_from_token('$2'), unwrap_splice('$1') ++ ['$3']}], element(2, '$4')). + +stab_op_eol_and_expr -> stab_op_eol expr : {'$1', '$2'}. +stab_op_eol_and_expr -> stab_op_eol : warn_empty_stab_clause('$1'), {'$1', nil}. + +block_item -> block_eoe stab_eoe : {?exprs('$1'), build_stab(reverse('$2'))}. +block_item -> block_eoe : {?exprs('$1'), nil}. + +block_list -> block_item : ['$1']. +block_list -> block_item block_list : ['$1'|'$2']. + +%% Helpers + +open_paren -> '(' : '$1'. +open_paren -> '(' eol : '$1'. +close_paren -> ')' : '$1'. +close_paren -> eol ')' : '$2'. + +empty_paren -> open_paren ')' : '$1'. + +open_bracket -> '[' : '$1'. +open_bracket -> '[' eol : '$1'. +close_bracket -> ']' : '$1'. +close_bracket -> eol ']' : '$2'. + +open_bit -> '<<' : '$1'. +open_bit -> '<<' eol : '$1'. +close_bit -> '>>' : '$1'. +close_bit -> eol '>>' : '$2'. + +open_curly -> '{' : '$1'. +open_curly -> '{' eol : '$1'. +close_curly -> '}' : '$1'. +close_curly -> eol '}' : '$2'. + +% Operators + +add_op_eol -> add_op : '$1'. +add_op_eol -> add_op eol : '$1'. +add_op_eol -> dual_op : '$1'. +add_op_eol -> dual_op eol : '$1'. + +mult_op_eol -> mult_op : '$1'. +mult_op_eol -> mult_op eol : '$1'. + +two_op_eol -> two_op : '$1'. +two_op_eol -> two_op eol : '$1'. + +three_op_eol -> three_op : '$1'. +three_op_eol -> three_op eol : '$1'. + +pipe_op_eol -> pipe_op : '$1'. +pipe_op_eol -> pipe_op eol : '$1'. + +capture_op_eol -> capture_op : '$1'. +capture_op_eol -> capture_op eol : '$1'. + +unary_op_eol -> unary_op : '$1'. +unary_op_eol -> unary_op eol : '$1'. +unary_op_eol -> dual_op : '$1'. +unary_op_eol -> dual_op eol : '$1'. + +match_op_eol -> match_op : '$1'. +match_op_eol -> match_op eol : '$1'. + +and_op_eol -> and_op : '$1'. +and_op_eol -> and_op eol : '$1'. + +or_op_eol -> or_op : '$1'. +or_op_eol -> or_op eol : '$1'. + +in_op_eol -> in_op : '$1'. +in_op_eol -> in_op eol : '$1'. + +in_match_op_eol -> in_match_op : '$1'. +in_match_op_eol -> in_match_op eol : '$1'. + +type_op_eol -> type_op : '$1'. +type_op_eol -> type_op eol : '$1'. + +when_op_eol -> when_op : '$1'. +when_op_eol -> when_op eol : '$1'. + +stab_op_eol -> stab_op : '$1'. +stab_op_eol -> stab_op eol : '$1'. + +at_op_eol -> at_op : '$1'. +at_op_eol -> at_op eol : '$1'. + +comp_op_eol -> comp_op : '$1'. +comp_op_eol -> comp_op eol : '$1'. + +rel_op_eol -> rel_op : '$1'. +rel_op_eol -> rel_op eol : '$1'. + +arrow_op_eol -> arrow_op : '$1'. +arrow_op_eol -> arrow_op eol : '$1'. + +% Dot operator + +dot_op -> '.' : '$1'. +dot_op -> '.' eol : '$1'. + +dot_identifier -> identifier : '$1'. +dot_identifier -> matched_expr dot_op identifier : build_dot('$2', '$1', '$3'). + +dot_alias -> aliases : {'__aliases__', meta_from_token('$1', 0), ?exprs('$1')}. +dot_alias -> matched_expr dot_op aliases : build_dot_alias('$2', '$1', '$3'). +dot_alias -> matched_expr dot_op dot_alias_container : build_dot_container('$2', '$1', '$3'). + +dot_alias_container -> open_curly '}' : []. +dot_alias_container -> open_curly container_args close_curly : '$2'. + +dot_op_identifier -> op_identifier : '$1'. +dot_op_identifier -> matched_expr dot_op op_identifier : build_dot('$2', '$1', '$3'). + +dot_do_identifier -> do_identifier : '$1'. +dot_do_identifier -> matched_expr dot_op do_identifier : build_dot('$2', '$1', '$3'). + +dot_bracket_identifier -> bracket_identifier : '$1'. +dot_bracket_identifier -> matched_expr dot_op bracket_identifier : build_dot('$2', '$1', '$3'). + +dot_paren_identifier -> paren_identifier : '$1'. +dot_paren_identifier -> matched_expr dot_op paren_identifier : build_dot('$2', '$1', '$3'). + +parens_call -> dot_paren_identifier : '$1'. +parens_call -> matched_expr dot_call_op : {'.', meta_from_token('$2'), ['$1']}. % Fun/local calls + +% Function calls with no parentheses + +call_args_no_parens_expr -> matched_expr : '$1'. +call_args_no_parens_expr -> no_parens_expr : throw_no_parens_many_strict('$1'). + +call_args_no_parens_comma_expr -> matched_expr ',' call_args_no_parens_expr : ['$3', '$1']. +call_args_no_parens_comma_expr -> call_args_no_parens_comma_expr ',' call_args_no_parens_expr : ['$3'|'$1']. + +call_args_no_parens_all -> call_args_no_parens_one : '$1'. +call_args_no_parens_all -> call_args_no_parens_ambig : '$1'. +call_args_no_parens_all -> call_args_no_parens_many : '$1'. + +call_args_no_parens_one -> call_args_no_parens_kw : ['$1']. +call_args_no_parens_one -> matched_expr : ['$1']. + +call_args_no_parens_ambig -> no_parens_expr : ['$1']. + +call_args_no_parens_many -> matched_expr ',' call_args_no_parens_kw : ['$1', '$3']. +call_args_no_parens_many -> call_args_no_parens_comma_expr : reverse('$1'). +call_args_no_parens_many -> call_args_no_parens_comma_expr ',' call_args_no_parens_kw : reverse(['$3'|'$1']). + +call_args_no_parens_many_strict -> call_args_no_parens_many : '$1'. +call_args_no_parens_many_strict -> open_paren call_args_no_parens_kw close_paren : throw_no_parens_strict('$1'). +call_args_no_parens_many_strict -> open_paren call_args_no_parens_many close_paren : throw_no_parens_strict('$1'). + +stab_parens_many -> open_paren call_args_no_parens_kw close_paren : ['$2']. +stab_parens_many -> open_paren call_args_no_parens_many close_paren : '$2'. + +% Containers + +container_expr -> matched_expr : '$1'. +container_expr -> unmatched_expr : '$1'. +container_expr -> no_parens_expr : throw_no_parens_container_strict('$1'). + +container_args_base -> container_expr : ['$1']. +container_args_base -> container_args_base ',' container_expr : ['$3'|'$1']. + +container_args -> container_args_base : lists:reverse('$1'). +container_args -> container_args_base ',' : lists:reverse('$1'). +container_args -> container_args_base ',' kw : lists:reverse(['$3'|'$1']). + +% Function calls with parentheses + +call_args_parens_expr -> matched_expr : '$1'. +call_args_parens_expr -> unmatched_expr : '$1'. +call_args_parens_expr -> no_parens_expr : throw_no_parens_many_strict('$1'). + +call_args_parens_base -> call_args_parens_expr : ['$1']. +call_args_parens_base -> call_args_parens_base ',' call_args_parens_expr : ['$3'|'$1']. + +call_args_parens -> empty_paren : []. +call_args_parens -> open_paren no_parens_expr close_paren : ['$2']. +call_args_parens -> open_paren kw close_paren : ['$2']. +call_args_parens -> open_paren call_args_parens_base close_paren : reverse('$2'). +call_args_parens -> open_paren call_args_parens_base ',' kw close_paren : reverse(['$4'|'$2']). + +% KV + +kw_eol -> kw_identifier : ?exprs('$1'). +kw_eol -> kw_identifier eol : ?exprs('$1'). +kw_eol -> kw_identifier_safe : build_quoted_atom('$1', true). +kw_eol -> kw_identifier_safe eol : build_quoted_atom('$1', true). +kw_eol -> kw_identifier_unsafe : build_quoted_atom('$1', false). +kw_eol -> kw_identifier_unsafe eol : build_quoted_atom('$1', false). + +kw_base -> kw_eol container_expr : [{'$1', '$2'}]. +kw_base -> kw_base ',' kw_eol container_expr : [{'$3', '$4'}|'$1']. + +kw -> kw_base : reverse('$1'). +kw -> kw_base ',' : reverse('$1'). + +call_args_no_parens_kw_expr -> kw_eol matched_expr : {'$1', '$2'}. +call_args_no_parens_kw_expr -> kw_eol no_parens_expr : {'$1', '$2'}. + +call_args_no_parens_kw -> call_args_no_parens_kw_expr : ['$1']. +call_args_no_parens_kw -> call_args_no_parens_kw_expr ',' call_args_no_parens_kw : ['$1'|'$3']. + +% Lists + +list_args -> kw : '$1'. +list_args -> container_args_base : reverse('$1'). +list_args -> container_args_base ',' : reverse('$1'). +list_args -> container_args_base ',' kw : reverse('$1', '$3'). + +list -> open_bracket ']' : build_list('$1', []). +list -> open_bracket list_args close_bracket : build_list('$1', '$2'). + +% Tuple + +tuple -> open_curly '}' : build_tuple('$1', []). +tuple -> open_curly container_args close_curly : build_tuple('$1', '$2'). + +% Bitstrings + +bit_string -> open_bit '>>' : build_bit('$1', []). +bit_string -> open_bit container_args close_bit : build_bit('$1', '$2'). + +% Map and structs + +%% Allow unquote/@something/aliases inside maps and structs. +map_expr -> max_expr : '$1'. +map_expr -> dot_identifier : build_identifier('$1', nil). +map_expr -> at_op_eol map_expr : build_unary_op('$1', '$2'). + +assoc_op_eol -> assoc_op : '$1'. +assoc_op_eol -> assoc_op eol : '$1'. + +assoc_expr -> matched_expr assoc_op_eol matched_expr : {'$1', '$3'}. +assoc_expr -> unmatched_expr assoc_op_eol unmatched_expr : {'$1', '$3'}. +assoc_expr -> matched_expr assoc_op_eol unmatched_expr : {'$1', '$3'}. +assoc_expr -> unmatched_expr assoc_op_eol matched_expr : {'$1', '$3'}. +assoc_expr -> map_expr : '$1'. + +assoc_update -> matched_expr pipe_op_eol assoc_expr : {'$2', '$1', ['$3']}. +assoc_update -> unmatched_expr pipe_op_eol assoc_expr : {'$2', '$1', ['$3']}. + +assoc_update_kw -> matched_expr pipe_op_eol kw : {'$2', '$1', '$3'}. +assoc_update_kw -> unmatched_expr pipe_op_eol kw : {'$2', '$1', '$3'}. + +assoc_base -> assoc_expr : ['$1']. +assoc_base -> assoc_base ',' assoc_expr : ['$3'|'$1']. + +assoc -> assoc_base : reverse('$1'). +assoc -> assoc_base ',' : reverse('$1'). + +map_op -> '%{}' : '$1'. +map_op -> '%{}' eol : '$1'. + +map_close -> kw close_curly : '$1'. +map_close -> assoc close_curly : '$1'. +map_close -> assoc_base ',' kw close_curly : reverse('$1', '$3'). + +map_args -> open_curly '}' : build_map('$1', []). +map_args -> open_curly map_close : build_map('$1', '$2'). +map_args -> open_curly assoc_update close_curly : build_map_update('$1', '$2', []). +map_args -> open_curly assoc_update ',' close_curly : build_map_update('$1', '$2', []). +map_args -> open_curly assoc_update ',' map_close : build_map_update('$1', '$2', '$4'). +map_args -> open_curly assoc_update_kw close_curly : build_map_update('$1', '$2', []). + +struct_op -> '%' : '$1'. + +map -> map_op map_args : '$2'. +map -> struct_op map_expr map_args : {'%', meta_from_token('$1'), ['$2', '$3']}. +map -> struct_op map_expr eol map_args : {'%', meta_from_token('$1'), ['$2', '$4']}. + +Erlang code. + +-define(file(), get(elixir_parser_file)). +-define(id(Token), element(1, Token)). +-define(location(Token), element(2, Token)). +-define(exprs(Token), element(3, Token)). +-define(meta(Node), element(2, Node)). +-define(rearrange_uop(Op), (Op == 'not' orelse Op == '!')). + +%% The following directive is needed for (significantly) faster +%% compilation of the generated .erl file by the HiPE compiler +-compile([{hipe, [{regalloc, linear_scan}]}]). +-import(lists, [reverse/1, reverse/2]). + +meta_from_token(Token, Counter) -> [{counter, Counter}|meta_from_token(Token)]. +meta_from_token(Token) -> meta_from_location(?location(Token)). + +meta_from_location({Line, Column, EndColumn}) + when is_integer(Line), is_integer(Column), is_integer(EndColumn) -> [{line, Line}]. + +%% Operators + +build_op({_Kind, Location, 'in'}, {UOp, _, [Left]}, Right) when ?rearrange_uop(UOp) -> + {UOp, meta_from_location(Location), [{'in', meta_from_location(Location), [Left, Right]}]}; + +build_op({_Kind, Location, Op}, Left, Right) -> + {Op, meta_from_location(Location), [Left, Right]}. + +build_unary_op({_Kind, Location, Op}, Expr) -> + {Op, meta_from_location(Location), [Expr]}. + +build_list(Marker, Args) -> + {Args, ?location(Marker)}. + +build_tuple(_Marker, [Left, Right]) -> + {Left, Right}; +build_tuple(Marker, Args) -> + {'{}', meta_from_token(Marker), Args}. + +build_bit(Marker, Args) -> + {'<<>>', meta_from_token(Marker), Args}. + +build_map(Marker, Args) -> + {'%{}', meta_from_token(Marker), Args}. + +build_map_update(Marker, {Pipe, Left, Right}, Extra) -> + {'%{}', meta_from_token(Marker), [build_op(Pipe, Left, Right ++ Extra)]}. + +%% Blocks + +build_block([{Op, _, [_]}]=Exprs) when ?rearrange_uop(Op) -> {'__block__', [], Exprs}; +build_block([{unquote_splicing, _, Args}]=Exprs) when + length(Args) =< 2 -> {'__block__', [], Exprs}; +build_block([Expr]) -> Expr; +build_block(Exprs) -> {'__block__', [], Exprs}. + +%% Dots + +build_dot_alias(Dot, {'__aliases__', _, Left}, {'aliases', _, Right}) -> + {'__aliases__', meta_from_token(Dot), Left ++ Right}; + +build_dot_alias(_Dot, Atom, {'aliases', _, _} = Token) when is_atom(Atom) -> + throw_bad_atom(Token); + +build_dot_alias(Dot, Other, {'aliases', _, Right}) -> + {'__aliases__', meta_from_token(Dot), [Other|Right]}. + +build_dot_container(Dot, Left, Right) -> + Meta = meta_from_token(Dot), + {{'.', Meta, [Left, '{}']}, Meta, Right}. + +build_dot(Dot, Left, Right) -> + {'.', meta_from_token(Dot), [Left, extract_identifier(Right)]}. + +extract_identifier({Kind, _, Identifier}) when + Kind == identifier; Kind == bracket_identifier; Kind == paren_identifier; + Kind == do_identifier; Kind == op_identifier -> + Identifier. + +%% Identifiers + +build_nested_parens(Dot, Args1, Args2) -> + Identifier = build_identifier(Dot, Args1), + Meta = ?meta(Identifier), + {Identifier, Meta, Args2}. + +build_identifier({'.', Meta, _} = Dot, Args) -> + FArgs = case Args of + nil -> []; + _ -> Args + end, + {Dot, Meta, FArgs}; + +build_identifier({op_identifier, Location, Identifier}, [Arg]) -> + {Identifier, [{ambiguous_op, nil}|meta_from_location(Location)], [Arg]}; + +build_identifier({_, Location, Identifier}, Args) -> + {Identifier, meta_from_location(Location), Args}. + +%% Fn + +build_fn(Op, [{'->', _, [_, _]}|_] = Stab) -> + {fn, meta_from_token(Op), build_stab(Stab)}; +build_fn(Op, _Stab) -> + throw(meta_from_token(Op), "expected clauses to be defined with -> inside: ", "'fn'"). + +%% Access + +build_access(Expr, {List, Location}) -> + Meta = meta_from_location(Location), + {{'.', Meta, ['Elixir.Access', get]}, Meta, [Expr, List]}. + +%% Interpolation aware + +build_sigil({sigil, Location, Sigil, Parts, Modifiers}) -> + Meta = meta_from_location(Location), + {list_to_atom("sigil_" ++ [Sigil]), Meta, [{'<<>>', Meta, string_parts(Parts)}, Modifiers]}. + +build_bin_string({bin_string, _Location, [H]}) when is_binary(H) -> + H; +build_bin_string({bin_string, Location, Args}) -> + {'<<>>', meta_from_location(Location), string_parts(Args)}. + +build_list_string({list_string, _Location, [H]}) when is_binary(H) -> + elixir_utils:characters_to_list(H); +build_list_string({list_string, Location, Args}) -> + Meta = meta_from_location(Location), + {{'.', Meta, ['Elixir.String', to_char_list]}, Meta, [{'<<>>', Meta, string_parts(Args)}]}. + +build_quoted_atom({_, _Location, [H]}, Safe) when is_binary(H) -> + Op = binary_to_atom_op(Safe), erlang:Op(H, utf8); +build_quoted_atom({_, Location, Args}, Safe) -> + Meta = meta_from_location(Location), + {{'.', Meta, [erlang, binary_to_atom_op(Safe)]}, Meta, [{'<<>>', Meta, string_parts(Args)}, utf8]}. + +binary_to_atom_op(true) -> binary_to_existing_atom; +binary_to_atom_op(false) -> binary_to_atom. + +string_parts(Parts) -> + [string_part(Part) || Part <- Parts]. +string_part(Binary) when is_binary(Binary) -> + Binary; +string_part({Location, Tokens}) -> + Form = string_tokens_parse(Tokens), + Meta = meta_from_location(Location), + {'::', Meta, [{{'.', Meta, ['Elixir.Kernel', to_string]}, Meta, [Form]}, {binary, Meta, nil}]}. + +string_tokens_parse(Tokens) -> + case parse(Tokens) of + {ok, Forms} -> Forms; + {error, _} = Error -> throw(Error) + end. + +%% Keywords + +build_stab([{'->', Meta, [Left, Right]}|T]) -> + build_stab(Meta, T, Left, [Right], []); + +build_stab(Else) -> + build_block(Else). + +build_stab(Old, [{'->', New, [Left, Right]}|T], Marker, Temp, Acc) -> + H = {'->', Old, [Marker, build_block(reverse(Temp))]}, + build_stab(New, T, Left, [Right], [H|Acc]); + +build_stab(Meta, [H|T], Marker, Temp, Acc) -> + build_stab(Meta, T, Marker, [H|Temp], Acc); + +build_stab(Meta, [], Marker, Temp, Acc) -> + H = {'->', Meta, [Marker, build_block(reverse(Temp))]}, + reverse([H|Acc]). + +%% Every time the parser sees a (unquote_splicing()) +%% it assumes that a block is being spliced, wrapping +%% the splicing in a __block__. But in the stab clause, +%% we can have (unquote_splicing(1, 2, 3)) -> :ok, in such +%% case, we don't actually want the block, since it is +%% an arg style call. unwrap_splice unwraps the splice +%% from such blocks. +unwrap_splice([{'__block__', [], [{unquote_splicing, _, _}] = Splice}]) -> + Splice; + +unwrap_splice(Other) -> Other. + +unwrap_when(Args) -> + case elixir_utils:split_last(Args) of + {Start, {'when', Meta, [_, _] = End}} -> + [{'when', Meta, Start ++ End}]; + {_, _} -> + Args + end. + +to_block([One]) -> One; +to_block(Other) -> {'__block__', [], reverse(Other)}. + +%% Warnings and errors + +throw(Meta, Error, Token) -> + Line = + case lists:keyfind(line, 1, Meta) of + {line, L} -> L; + false -> 0 + end, + throw({error, {Line, ?MODULE, [Error, Token]}}). + +throw_bad_atom(Token) -> + throw(meta_from_token(Token), "atom cannot be followed by an alias. If the '.' was meant to be " + "part of the atom's name, the atom name must be quoted. Syntax error before: ", "'.'"). + +throw_no_parens_strict(Token) -> + throw(meta_from_token(Token), "unexpected parentheses. If you are making a " + "function call, do not insert spaces between the function name and the " + "opening parentheses. Syntax error before: ", "'('"). + +throw_no_parens_many_strict(Node) -> + throw(?meta(Node), + "unexpected comma. Parentheses are required to solve ambiguity in nested calls.\n\n" + "This error happens when you have nested function calls without parentheses. " + "For example:\n\n" + " one a, two b, c, d\n\n" + "In the example above, we don't know if the parameters \"c\" and \"d\" apply " + "to the function \"one\" or \"two\". You can solve this by explicitly adding " + "parentheses:\n\n" + " one a, two(b, c, d)\n\n" + "Elixir cannot compile otherwise. Syntax error before: ", "','"). + +throw_no_parens_container_strict(Node) -> + throw(?meta(Node), + "unexpected comma. Parentheses are required to solve ambiguity inside containers.\n\n" + "This error may happen when you forget a comma in a list or other container:\n\n" + " [a, b c, d]\n\n" + "Or when you have ambiguous calls:\n\n" + " [one, two three, four, five]\n\n" + "In the example above, we don't know if the parameters \"four\" and \"five\" " + "belongs to the list or the function \"two\". You can solve this by explicitly " + "adding parentheses:\n\n" + " [one, two(three, four), five]\n\n" + "Elixir cannot compile otherwise. Syntax error before: ", "','"). + +throw_invalid_kw_identifier({_, _, do} = Token) -> + throw(meta_from_token(Token), elixir_tokenizer:invalid_do_error("unexpected keyword \"do:\""), "'do:'"); +throw_invalid_kw_identifier({_, _, KW} = Token) -> + throw(meta_from_token(Token), "syntax error before: ", "'" ++ atom_to_list(KW) ++ "':"). + +%% TODO: Make those warnings errors. +warn_empty_stab_clause({stab_op, {Line, _Begin, _End}, '->'}) -> + elixir_errors:warn(Line, ?file(), + "an expression is always required on the right side of ->. " + "Please provide a value after ->"). + +warn_pipe({arrow_op, {Line, _Begin, _End}, Op}, {_, [_|_], [_|_]}) -> + elixir_errors:warn(Line, ?file(), + io_lib:format( + "you are piping into a function call without parentheses, which may be ambiguous. " + "Please wrap the function you are piping into in parentheses. For example:\n\n" + " foo 1 ~ts bar 2 ~ts baz 3\n\n" + "Should be written as:\n\n" + " foo(1) ~ts bar(2) ~ts baz(3)\n", + [Op, Op, Op, Op] + ) + ); +warn_pipe(_Token, _) -> + ok. diff --git a/samples/Erlang/lfe_scan.xrl b/samples/Erlang/lfe_scan.xrl new file mode 100644 index 00000000..72bb1b22 --- /dev/null +++ b/samples/Erlang/lfe_scan.xrl @@ -0,0 +1,256 @@ +%% Copyright (c) 2008-2013 Robert Virding +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. + +%% File : lfe_scan.xrl +%% Author : Robert Virding +%% Purpose : Token definitions for Lisp Flavoured Erlang. + +Definitions. +B = [01] +O = [0-7] +D = [0-9] +H = [0-9a-fA-F] +B36 = [0-9a-zA-Z] +U = [A-Z] +L = [a-z] +A = ({U}|{L}) +DEL = [][()}{";\000-\s] +SYM = [^][()}{";\000-\s\177-\237] +SSYM = [^][()}{"|;#`',\000-\s\177-\237] +WS = ([\000-\s]|;[^\n]*) + +Rules. +%% Bracketed Comments using #| foo |# +#{D}*\|[^\|]*\|+([^#\|][^\|]*\|+)*# : + block_comment(string:substr(TokenChars, 3)). + +%% Separators +' : {token,{'\'',TokenLine}}. +` : {token,{'`',TokenLine}}. +, : {token,{',',TokenLine}}. +,@ : {token,{',@',TokenLine}}. +\. : {token,{'.',TokenLine}}. +[][()}{] : {token,{list_to_atom(TokenChars),TokenLine}}. + +#{D}*[bB]\( : {token,{'#B(',TokenLine}}. +#{D}*[mM]\( : {token,{'#M(',TokenLine}}. +#{D}*\( : {token,{'#(',TokenLine}}. +#{D}*\. : {token,{'#.',TokenLine}}. + +#{D}*` : {token,{'#`',TokenLine}}. +#{D}*; : {token,{'#;',TokenLine}}. +#{D}*, : {token,{'#,',TokenLine}}. +#{D}*,@ : {token,{'#,@',TokenLine}}. + +%% Characters +#{D}*\\(x{H}+|.) : char_token(skip_past(TokenChars, $\\, $\\), TokenLine). + +%% Based numbers +#{D}*\*{SYM}+ : base_token(skip_past(TokenChars, $*, $*), 2, TokenLine). +#{D}*[bB]{SYM}+ : base_token(skip_past(TokenChars, $b, $B), 2, TokenLine). +#{D}*[oO]{SYM}+ : base_token(skip_past(TokenChars, $o, $O), 8, TokenLine). +#{D}*[dD]{SYM}+ : base_token(skip_past(TokenChars, $d, $D), 10, TokenLine). +#{D}*[xX]{SYM}+ : base_token(skip_past(TokenChars, $x, $X), 16, TokenLine). +#{D}*[rR]{SYM}+ : + %% Scan over digit chars to get base. + {Base,[_|Ds]} = base1(tl(TokenChars), 10, 0), + base_token(Ds, Base, TokenLine). + +%% String +"(\\x{H}+;|\\.|[^"\\])*" : + %% Strip quotes. + S = string:substr(TokenChars, 2, TokenLen - 2), + {token,{string,TokenLine,chars(S)}}. +%% Binary string +#"(\\x{H}+;|\\.|[^"\\])*" : + %% Strip quotes. + S = string:substr(TokenChars, 3, TokenLen - 3), + Bin = unicode:characters_to_binary(chars(S), utf8, utf8), + {token,{binary,TokenLine,Bin}}. +%% Symbols +\|(\\x{H}+;|\\.|[^|\\])*\| : + %% Strip quotes. + S = string:substr(TokenChars, 2, TokenLen - 2), + symbol_token(chars(S), TokenLine). +%% Funs +#'{SSYM}{SYM}*/{D}+ : + %% Strip sharpsign single-quote. + FunStr = string:substr(TokenChars,3), + {token,{'#\'',TokenLine,FunStr}}. +%% Atoms +[+-]?{D}+ : + case catch {ok,list_to_integer(TokenChars)} of + {ok,I} -> {token,{number,TokenLine,I}}; + _ -> {error,"illegal integer"} + end. +[+-]?{D}+\.{D}+([eE][+-]?{D}+)? : + case catch {ok,list_to_float(TokenChars)} of + {ok,F} -> {token,{number,TokenLine,F}}; + _ -> {error,"illegal float"} + end. +{SSYM}{SYM}* : + symbol_token(TokenChars, TokenLine). +{WS}+ : skip_token. + +Erlang code. +%% Copyright (c) 2008-2013 Robert Virding +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. + +%% File : lfe_scan.erl +%% Author : Robert Virding +%% Purpose : Token definitions for Lisp Flavoured Erlang. + +-export([start_symbol_char/1,symbol_char/1]). + +-import(string, [substr/2,substr/3]). + +%% start_symbol_char(Char) -> true | false. +%% symbol_char(Char) -> true | false. +%% Define start symbol chars and symbol chars. + +start_symbol_char($#) -> false; +start_symbol_char($`) -> false; +start_symbol_char($') -> false; %' +start_symbol_char($,) -> false; +start_symbol_char($|) -> false; %Symbol quote character +start_symbol_char(C) -> symbol_char(C). + +symbol_char($() -> false; +symbol_char($)) -> false; +symbol_char($[) -> false; +symbol_char($]) -> false; +symbol_char(${) -> false; +symbol_char($}) -> false; +symbol_char($") -> false; +symbol_char($;) -> false; +symbol_char(C) -> ((C > $\s) and (C =< $~)) orelse (C > $\240). + +%% symbol_token(Chars, Line) -> {token,{symbol,Line,Symbol}} | {error,E}. +%% Build a symbol from list of legal characters, else error. + +symbol_token(Cs, L) -> + case catch {ok,list_to_atom(Cs)} of + {ok,S} -> {token,{symbol,L,S}}; + _ -> {error,"illegal symbol"} + end. + +%% base_token(Chars, Base, Line) -> Integer. +%% Convert a string of Base characters into a number. We only allow +%% base betqeen 2 and 36, and an optional sign character first. + +base_token(_, B, _) when B < 2; B > 36 -> + {error,"illegal number base"}; +base_token([$+|Cs], B, L) -> base_token(Cs, B, +1, L); +base_token([$-|Cs], B, L) -> base_token(Cs, B, -1, L); +base_token(Cs, B, L) -> base_token(Cs, B, +1, L). + +base_token(Cs, B, S, L) -> + case base1(Cs, B, 0) of + {N,[]} -> {token,{number,L,S*N}}; + {_,_} -> {error,"illegal based number"} + end. + +base1([C|Cs], Base, SoFar) when C >= $0, C =< $9, C < Base + $0 -> + Next = SoFar * Base + (C - $0), + base1(Cs, Base, Next); +base1([C|Cs], Base, SoFar) when C >= $a, C =< $z, C < Base + $a - 10 -> + Next = SoFar * Base + (C - $a + 10), + base1(Cs, Base, Next); +base1([C|Cs], Base, SoFar) when C >= $A, C =< $Z, C < Base + $A - 10 -> + Next = SoFar * Base + (C - $A + 10), + base1(Cs, Base, Next); +base1([C|Cs], _Base, SoFar) -> {SoFar,[C|Cs]}; +base1([], _Base, N) -> {N,[]}. + +-define(IS_UNICODE(C), ((C >= 0) and (C =< 16#10FFFF))). + +%% char_token(InputChars, Line) -> {token,{number,L,N}} | {error,E}. +%% Convert an input string into the corresponding character. For a +%% sequence of hex characters we check resultant is code is in the +%% unicode range. + +char_token([$x,C|Cs], L) -> + case base1([C|Cs], 16, 0) of + {N,[]} when ?IS_UNICODE(N) -> {token,{number,L,N}}; + _ -> {error,"illegal character"} + end; +char_token([C], L) -> {token,{number,L,C}}. + +%% chars(InputChars) -> Chars. +%% Convert an input string into the corresponding string characters. +%% We know that the input string is correct. + +chars([$\\,$x,C|Cs0]) -> + case hex_char(C) of + true -> + case base1([C|Cs0], 16, 0) of + {N,[$;|Cs1]} -> [N|chars(Cs1)]; + _Other -> [escape_char($x)|chars([C|Cs0])] + end; + false -> [escape_char($x)|chars([C|Cs0])] + end; +chars([$\\,C|Cs]) -> [escape_char(C)|chars(Cs)]; +chars([C|Cs]) -> [C|chars(Cs)]; +chars([]) -> []. + +hex_char(C) when C >= $0, C =< $9 -> true; +hex_char(C) when C >= $a, C =< $f -> true; +hex_char(C) when C >= $A, C =< $F -> true; +hex_char(_) -> false. + +escape_char($b) -> $\b; %\b = BS +escape_char($t) -> $\t; %\t = TAB +escape_char($n) -> $\n; %\n = LF +escape_char($v) -> $\v; %\v = VT +escape_char($f) -> $\f; %\f = FF +escape_char($r) -> $\r; %\r = CR +escape_char($e) -> $\e; %\e = ESC +escape_char($s) -> $\s; %\s = SPC +escape_char($d) -> $\d; %\d = DEL +escape_char(C) -> C. + +%% Block Comment: +%% Provide a sensible error when people attempt to include nested +%% comments because currently the parser cannot process them without +%% a rebuild. But simply exploding on a '#|' is not going to be that +%% helpful. + +block_comment(TokenChars) -> + %% Check we're not opening another comment block. + case string:str(TokenChars, "#|") of + 0 -> skip_token; %% No nesting found + _ -> {error, "illegal nested block comment"} + end. + +%% skip_until(String, Char1, Char2) -> String. +%% skip_past(String, Char1, Char2) -> String. + +%% skip_until([C|_]=Cs, C1, C2) when C =:= C1 ; C =:= C2 -> Cs; +%% skip_until([_|Cs], C1, C2) -> skip_until(Cs, C1, C2); +%% skip_until([], _, _) -> []. + +skip_past([C|Cs], C1, C2) when C =:= C1 ; C =:= C2 -> Cs; +skip_past([_|Cs], C1, C2) -> skip_past(Cs, C1, C2); +skip_past([], _, _) -> []. diff --git a/samples/Inform 7/Trivial Extension.i7x b/samples/Inform 7/Trivial Extension.i7x index 1aae1b85..40f53dab 100644 --- a/samples/Inform 7/Trivial Extension.i7x +++ b/samples/Inform 7/Trivial Extension.i7x @@ -1,6 +1,74 @@ -Version 1 of Trivial Extension by Andrew Plotkin begins here. +Version 2 of Trivial Extension by Andrew Plotkin begins here. + +"This is the rubric of the extension." + +"provided for the Linguist package by Andrew Plotkin" + +[Note the two special quoted lines above.] A cow is a kind of animal. A cow can be purple. +Understand "cow" as a cow. +Understand "purple" as a purple cow. + +Check pushing a cow: + instead say "Cow-tipping, at your age?[paragraph break]Inconceivable." + +[Here are the possible levels of heading:] + +Volume One + +Text-line is always "A line of text." + +Book 2 + + Part the third - indented headings still count + +Chapter IV - not for release + +[Heading labels are case-insensitive.] + +section foobar + +[A line beginning "Volume" that does not have blank lines before and after it is *not* a header line. So the following should all be part of section foobar. Sadly, the "Volume is..." line gets colored as a header, because Atom's regexp model can't recognize "thing with blank lines before and after"!] + +Measure is a kind of value. +Volume is a measure. Length is a measure. +Area is a measure. + +[And now some Inform 6 inclusions.] + +To say em -- running on: + (- style underline; -). +To say /em -- running on: + (- style roman; -). + +Include (- + +! Inform 6 comments start with a ! mark and run to the end of the line. +Global cowcount; + +[ inform6func arg; + print "Here is some text; ", (address) 'dictword', ".^"; + cowcount++; ! increment this variable +]; + +Object i6cow + with name 'cow' 'animal', + with description "It looks like a cow.", + has animate scenery; + +-) after "Global Variables" in "Output.i6t". + Trivial Extension ends here. +---- DOCUMENTATION ---- + +Everything after the "---- DOCUMENTATION ----" line is documentation, so it should have the comment style. + +However, tab-indented lines are sample Inform code within the documentation: + + Horns are a kind of thing. Every cow has horns. + say "Moo[if the noun is purple] indigo[end if]." + +So we need to allow for that. diff --git a/samples/Inform 7/story.ni b/samples/Inform 7/story.ni index f8873369..4fd774b5 100644 --- a/samples/Inform 7/story.ni +++ b/samples/Inform 7/story.ni @@ -2,11 +2,61 @@ Include Trivial Extension by Andrew Plotkin. +Volume 1 - overview + +Chapter - setting the scene + The Kitchen is a room. -[This kitchen is modelled after the one in Zork, although it lacks the detail to establish this to the player.] +[Comment: this kitchen is modelled after the one in Zork, although it lacks the detail to establish this to the player.] + +Section - the kitchen table + +The spicerack is a container in the Kitchen. + +Table of Spices +Name Flavor +"cinnamon" 5 +"nutmeg" 4 +"szechuan pepper" 8 + +The description of the spicerack is "It's mostly empty." + +Chapter - a character A purple cow called Gelett is in the Kitchen. +[This comment spans multiple lines.. + +...and this line contains [nested square[] brackets]... + +...which is legal in Inform 7.] + Instead of examining Gelett: say "You'd rather see than be one." + +Instead of examining Gelett: + say "You'd rather see than be one." + +Check smelling Gelett: + say "This text contains several lines. + +A blank line is displayed as a paragraph break, +but a simple line break is not."; + stop the action. + +Section - cow catching + +Gelett has a number called the mooness. + +Instead of taking Gelett: + increment the mooness of Gelett; + if the mooness of Gelett is one: + say "Gelett moos once."; + else: + say "Gelett moos [mooness of Gelett in words] times."; + +Volume 2 - the turn cycle + +Every turn: + say "A turn passes[one of][or] placidly[or] idly[or] tediously[at random]." diff --git a/samples/Java/GrammarKit.java b/samples/Java/GrammarKit.java new file mode 100644 index 00000000..a5db6da8 --- /dev/null +++ b/samples/Java/GrammarKit.java @@ -0,0 +1,625 @@ +// This is a generated file. Not intended for manual editing. +package org.intellij.grammar.parser; + +import com.intellij.lang.PsiBuilder; +import com.intellij.lang.PsiBuilder.Marker; +import static org.intellij.grammar.psi.BnfTypes.*; +import static org.intellij.grammar.parser.GeneratedParserUtilBase.*; +import com.intellij.psi.tree.IElementType; +import com.intellij.lang.ASTNode; +import com.intellij.psi.tree.TokenSet; +import com.intellij.lang.PsiParser; +import com.intellij.lang.LightPsiParser; + +@SuppressWarnings({"SimplifiableIfStatement", "UnusedAssignment"}) +public class GrammarParser implements PsiParser, LightPsiParser { + + public ASTNode parse(IElementType t, PsiBuilder b) { + parseLight(t, b); + return b.getTreeBuilt(); + } + + public void parseLight(IElementType t, PsiBuilder b) { + boolean r; + b = adapt_builder_(t, b, this, EXTENDS_SETS_); + Marker m = enter_section_(b, 0, _COLLAPSE_, null); + if (t == BNF_ATTR) { + r = attr(b, 0); + } + else if (t == BNF_ATTR_PATTERN) { + r = attr_pattern(b, 0); + } + else if (t == BNF_ATTR_VALUE) { + r = attr_value(b, 0); + } + else if (t == BNF_ATTRS) { + r = attrs(b, 0); + } + else if (t == BNF_CHOICE) { + r = choice(b, 0); + } + else if (t == BNF_EXPRESSION) { + r = expression(b, 0); + } + else if (t == BNF_LITERAL_EXPRESSION) { + r = literal_expression(b, 0); + } + else if (t == BNF_MODIFIER) { + r = modifier(b, 0); + } + else if (t == BNF_PAREN_EXPRESSION) { + r = paren_expression(b, 0); + } + else if (t == BNF_PREDICATE) { + r = predicate(b, 0); + } + else if (t == BNF_PREDICATE_SIGN) { + r = predicate_sign(b, 0); + } + else if (t == BNF_QUANTIFIED) { + r = quantified(b, 0); + } + else if (t == BNF_QUANTIFIER) { + r = quantifier(b, 0); + } + else if (t == BNF_REFERENCE_OR_TOKEN) { + r = reference_or_token(b, 0); + } + else if (t == BNF_RULE) { + r = rule(b, 0); + } + else if (t == BNF_SEQUENCE) { + r = sequence(b, 0); + } + else if (t == BNF_STRING_LITERAL_EXPRESSION) { + r = string_literal_expression(b, 0); + } + else { + r = parse_root_(t, b, 0); + } + exit_section_(b, 0, m, t, r, true, TRUE_CONDITION); + } + + protected boolean parse_root_(IElementType t, PsiBuilder b, int l) { + return grammar(b, l + 1); + } + + public static final TokenSet[] EXTENDS_SETS_ = new TokenSet[] { + create_token_set_(BNF_LITERAL_EXPRESSION, BNF_STRING_LITERAL_EXPRESSION), + create_token_set_(BNF_CHOICE, BNF_EXPRESSION, BNF_LITERAL_EXPRESSION, BNF_PAREN_EXPRESSION, + BNF_PREDICATE, BNF_QUANTIFIED, BNF_REFERENCE_OR_TOKEN, BNF_SEQUENCE, + BNF_STRING_LITERAL_EXPRESSION), + }; + + /* ********************************************************** */ + // id attr_pattern? '=' attr_value ';'? + public static boolean attr(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr")) return false; + boolean r, p; + Marker m = enter_section_(b, l, _NONE_, ""); + r = consumeToken(b, BNF_ID); + p = r; // pin = 1 + r = r && report_error_(b, attr_1(b, l + 1)); + r = p && report_error_(b, consumeToken(b, BNF_OP_EQ)) && r; + r = p && report_error_(b, attr_value(b, l + 1)) && r; + r = p && attr_4(b, l + 1) && r; + exit_section_(b, l, m, BNF_ATTR, r, p, attr_recover_until_parser_); + return r || p; + } + + // attr_pattern? + private static boolean attr_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_1")) return false; + attr_pattern(b, l + 1); + return true; + } + + // ';'? + private static boolean attr_4(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_4")) return false; + consumeToken(b, BNF_SEMICOLON); + return true; + } + + /* ********************************************************** */ + // '(' string ')' + public static boolean attr_pattern(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_pattern")) return false; + if (!nextTokenIs(b, BNF_LEFT_PAREN)) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_LEFT_PAREN); + r = r && consumeToken(b, BNF_STRING); + r = r && consumeToken(b, BNF_RIGHT_PAREN); + exit_section_(b, m, BNF_ATTR_PATTERN, r); + return r; + } + + /* ********************************************************** */ + // !'}' + static boolean attr_recover_until(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_recover_until")) return false; + boolean r; + Marker m = enter_section_(b, l, _NOT_, null); + r = !consumeToken(b, BNF_RIGHT_BRACE); + exit_section_(b, l, m, null, r, false, null); + return r; + } + + /* ********************************************************** */ + // (reference_or_token | literal_expression) !'=' + public static boolean attr_value(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_value")) return false; + boolean r; + Marker m = enter_section_(b, l, _NONE_, ""); + r = attr_value_0(b, l + 1); + r = r && attr_value_1(b, l + 1); + exit_section_(b, l, m, BNF_ATTR_VALUE, r, false, null); + return r; + } + + // reference_or_token | literal_expression + private static boolean attr_value_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_value_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = reference_or_token(b, l + 1); + if (!r) r = literal_expression(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // !'=' + private static boolean attr_value_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_value_1")) return false; + boolean r; + Marker m = enter_section_(b, l, _NOT_, null); + r = !consumeToken(b, BNF_OP_EQ); + exit_section_(b, l, m, null, r, false, null); + return r; + } + + /* ********************************************************** */ + // '{' attr* '}' + public static boolean attrs(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attrs")) return false; + if (!nextTokenIs(b, BNF_LEFT_BRACE)) return false; + boolean r, p; + Marker m = enter_section_(b, l, _NONE_, null); + r = consumeToken(b, BNF_LEFT_BRACE); + p = r; // pin = 1 + r = r && report_error_(b, attrs_1(b, l + 1)); + r = p && consumeToken(b, BNF_RIGHT_BRACE) && r; + exit_section_(b, l, m, BNF_ATTRS, r, p, null); + return r || p; + } + + // attr* + private static boolean attrs_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attrs_1")) return false; + int c = current_position_(b); + while (true) { + if (!attr(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "attrs_1", c)) break; + c = current_position_(b); + } + return true; + } + + /* ********************************************************** */ + // '{' sequence ('|' sequence)* '}' | sequence choice_tail* + public static boolean choice(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice")) return false; + boolean r; + Marker m = enter_section_(b, l, _COLLAPSE_, ""); + r = choice_0(b, l + 1); + if (!r) r = choice_1(b, l + 1); + exit_section_(b, l, m, BNF_CHOICE, r, false, null); + return r; + } + + // '{' sequence ('|' sequence)* '}' + private static boolean choice_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_LEFT_BRACE); + r = r && sequence(b, l + 1); + r = r && choice_0_2(b, l + 1); + r = r && consumeToken(b, BNF_RIGHT_BRACE); + exit_section_(b, m, null, r); + return r; + } + + // ('|' sequence)* + private static boolean choice_0_2(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_0_2")) return false; + int c = current_position_(b); + while (true) { + if (!choice_0_2_0(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "choice_0_2", c)) break; + c = current_position_(b); + } + return true; + } + + // '|' sequence + private static boolean choice_0_2_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_0_2_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_OP_OR); + r = r && sequence(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // sequence choice_tail* + private static boolean choice_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_1")) return false; + boolean r; + Marker m = enter_section_(b); + r = sequence(b, l + 1); + r = r && choice_1_1(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // choice_tail* + private static boolean choice_1_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_1_1")) return false; + int c = current_position_(b); + while (true) { + if (!choice_tail(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "choice_1_1", c)) break; + c = current_position_(b); + } + return true; + } + + /* ********************************************************** */ + // '|' sequence + static boolean choice_tail(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_tail")) return false; + if (!nextTokenIs(b, BNF_OP_OR)) return false; + boolean r, p; + Marker m = enter_section_(b, l, _NONE_, null); + r = consumeToken(b, BNF_OP_OR); + p = r; // pin = 1 + r = r && sequence(b, l + 1); + exit_section_(b, l, m, null, r, p, null); + return r || p; + } + + /* ********************************************************** */ + // choice? + public static boolean expression(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "expression")) return false; + Marker m = enter_section_(b, l, _COLLAPSE_, ""); + choice(b, l + 1); + exit_section_(b, l, m, BNF_EXPRESSION, true, false, null); + return true; + } + + /* ********************************************************** */ + // (attrs | rule) * + static boolean grammar(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "grammar")) return false; + int c = current_position_(b); + while (true) { + if (!grammar_0(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "grammar", c)) break; + c = current_position_(b); + } + return true; + } + + // attrs | rule + private static boolean grammar_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "grammar_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = attrs(b, l + 1); + if (!r) r = rule(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + /* ********************************************************** */ + // string_literal_expression | number + public static boolean literal_expression(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "literal_expression")) return false; + if (!nextTokenIs(b, "", BNF_NUMBER, BNF_STRING)) return false; + boolean r; + Marker m = enter_section_(b, l, _COLLAPSE_, ""); + r = string_literal_expression(b, l + 1); + if (!r) r = consumeToken(b, BNF_NUMBER); + exit_section_(b, l, m, BNF_LITERAL_EXPRESSION, r, false, null); + return r; + } + + /* ********************************************************** */ + // 'private' | 'external' | 'wrapped' + public static boolean modifier(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "modifier")) return false; + boolean r; + Marker m = enter_section_(b, l, _NONE_, ""); + r = consumeToken(b, "private"); + if (!r) r = consumeToken(b, "external"); + if (!r) r = consumeToken(b, "wrapped"); + exit_section_(b, l, m, BNF_MODIFIER, r, false, null); + return r; + } + + /* ********************************************************** */ + // quantified | predicate + static boolean option(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "option")) return false; + boolean r; + Marker m = enter_section_(b); + r = quantified(b, l + 1); + if (!r) r = predicate(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + /* ********************************************************** */ + // '(' expression ')' + public static boolean paren_expression(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "paren_expression")) return false; + if (!nextTokenIs(b, BNF_LEFT_PAREN)) return false; + boolean r, p; + Marker m = enter_section_(b, l, _NONE_, null); + r = consumeToken(b, BNF_LEFT_PAREN); + p = r; // pin = 1 + r = r && report_error_(b, expression(b, l + 1)); + r = p && consumeToken(b, BNF_RIGHT_PAREN) && r; + exit_section_(b, l, m, BNF_PAREN_EXPRESSION, r, p, null); + return r || p; + } + + /* ********************************************************** */ + // predicate_sign simple + public static boolean predicate(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "predicate")) return false; + if (!nextTokenIs(b, "", BNF_OP_NOT, BNF_OP_AND)) return false; + boolean r; + Marker m = enter_section_(b, l, _NONE_, ""); + r = predicate_sign(b, l + 1); + r = r && simple(b, l + 1); + exit_section_(b, l, m, BNF_PREDICATE, r, false, null); + return r; + } + + /* ********************************************************** */ + // '&' | '!' + public static boolean predicate_sign(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "predicate_sign")) return false; + if (!nextTokenIs(b, "", BNF_OP_NOT, BNF_OP_AND)) return false; + boolean r; + Marker m = enter_section_(b, l, _NONE_, ""); + r = consumeToken(b, BNF_OP_AND); + if (!r) r = consumeToken(b, BNF_OP_NOT); + exit_section_(b, l, m, BNF_PREDICATE_SIGN, r, false, null); + return r; + } + + /* ********************************************************** */ + // '[' expression ']' | simple quantifier? + public static boolean quantified(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "quantified")) return false; + boolean r; + Marker m = enter_section_(b, l, _COLLAPSE_, ""); + r = quantified_0(b, l + 1); + if (!r) r = quantified_1(b, l + 1); + exit_section_(b, l, m, BNF_QUANTIFIED, r, false, null); + return r; + } + + // '[' expression ']' + private static boolean quantified_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "quantified_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_LEFT_BRACKET); + r = r && expression(b, l + 1); + r = r && consumeToken(b, BNF_RIGHT_BRACKET); + exit_section_(b, m, null, r); + return r; + } + + // simple quantifier? + private static boolean quantified_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "quantified_1")) return false; + boolean r; + Marker m = enter_section_(b); + r = simple(b, l + 1); + r = r && quantified_1_1(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // quantifier? + private static boolean quantified_1_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "quantified_1_1")) return false; + quantifier(b, l + 1); + return true; + } + + /* ********************************************************** */ + // '?' | '+' | '*' + public static boolean quantifier(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "quantifier")) return false; + boolean r; + Marker m = enter_section_(b, l, _NONE_, ""); + r = consumeToken(b, BNF_OP_OPT); + if (!r) r = consumeToken(b, BNF_OP_ONEMORE); + if (!r) r = consumeToken(b, BNF_OP_ZEROMORE); + exit_section_(b, l, m, BNF_QUANTIFIER, r, false, null); + return r; + } + + /* ********************************************************** */ + // id + public static boolean reference_or_token(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "reference_or_token")) return false; + if (!nextTokenIs(b, BNF_ID)) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_ID); + exit_section_(b, m, BNF_REFERENCE_OR_TOKEN, r); + return r; + } + + /* ********************************************************** */ + // modifier* id '::=' expression attrs? ';'? + public static boolean rule(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "rule")) return false; + boolean r, p; + Marker m = enter_section_(b, l, _NONE_, ""); + r = rule_0(b, l + 1); + r = r && consumeToken(b, BNF_ID); + r = r && consumeToken(b, BNF_OP_IS); + p = r; // pin = 3 + r = r && report_error_(b, expression(b, l + 1)); + r = p && report_error_(b, rule_4(b, l + 1)) && r; + r = p && rule_5(b, l + 1) && r; + exit_section_(b, l, m, BNF_RULE, r, p, rule_recover_until_parser_); + return r || p; + } + + // modifier* + private static boolean rule_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "rule_0")) return false; + int c = current_position_(b); + while (true) { + if (!modifier(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "rule_0", c)) break; + c = current_position_(b); + } + return true; + } + + // attrs? + private static boolean rule_4(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "rule_4")) return false; + attrs(b, l + 1); + return true; + } + + // ';'? + private static boolean rule_5(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "rule_5")) return false; + consumeToken(b, BNF_SEMICOLON); + return true; + } + + /* ********************************************************** */ + // !'{' + static boolean rule_recover_until(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "rule_recover_until")) return false; + boolean r; + Marker m = enter_section_(b, l, _NOT_, null); + r = !consumeToken(b, BNF_LEFT_BRACE); + exit_section_(b, l, m, null, r, false, null); + return r; + } + + /* ********************************************************** */ + // option + + public static boolean sequence(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "sequence")) return false; + boolean r; + Marker m = enter_section_(b, l, _COLLAPSE_, ""); + r = option(b, l + 1); + int c = current_position_(b); + while (r) { + if (!option(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "sequence", c)) break; + c = current_position_(b); + } + exit_section_(b, l, m, BNF_SEQUENCE, r, false, null); + return r; + } + + /* ********************************************************** */ + // !(modifier* id '::=' ) reference_or_token | literal_expression | paren_expression + static boolean simple(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "simple")) return false; + boolean r; + Marker m = enter_section_(b); + r = simple_0(b, l + 1); + if (!r) r = literal_expression(b, l + 1); + if (!r) r = paren_expression(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // !(modifier* id '::=' ) reference_or_token + private static boolean simple_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "simple_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = simple_0_0(b, l + 1); + r = r && reference_or_token(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // !(modifier* id '::=' ) + private static boolean simple_0_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "simple_0_0")) return false; + boolean r; + Marker m = enter_section_(b, l, _NOT_, null); + r = !simple_0_0_0(b, l + 1); + exit_section_(b, l, m, null, r, false, null); + return r; + } + + // modifier* id '::=' + private static boolean simple_0_0_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "simple_0_0_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = simple_0_0_0_0(b, l + 1); + r = r && consumeToken(b, BNF_ID); + r = r && consumeToken(b, BNF_OP_IS); + exit_section_(b, m, null, r); + return r; + } + + // modifier* + private static boolean simple_0_0_0_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "simple_0_0_0_0")) return false; + int c = current_position_(b); + while (true) { + if (!modifier(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "simple_0_0_0_0", c)) break; + c = current_position_(b); + } + return true; + } + + /* ********************************************************** */ + // string + public static boolean string_literal_expression(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "string_literal_expression")) return false; + if (!nextTokenIs(b, BNF_STRING)) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_STRING); + exit_section_(b, m, BNF_STRING_LITERAL_EXPRESSION, r); + return r; + } + + final static Parser attr_recover_until_parser_ = new Parser() { + public boolean parse(PsiBuilder b, int l) { + return attr_recover_until(b, l + 1); + } + }; + final static Parser rule_recover_until_parser_ = new Parser() { + public boolean parse(PsiBuilder b, int l) { + return rule_recover_until(b, l + 1); + } + }; +} diff --git a/samples/Java/JFlexLexer.java b/samples/Java/JFlexLexer.java new file mode 100644 index 00000000..e54564fb --- /dev/null +++ b/samples/Java/JFlexLexer.java @@ -0,0 +1,482 @@ +/* The following code was generated by JFlex 1.4.3 on 28/01/16 11:27 */ + +package test; +import com.intellij.lexer.*; +import com.intellij.psi.tree.IElementType; +import static org.intellij.grammar.psi.BnfTypes.*; + + +/** + * This class is a scanner generated by + * JFlex 1.4.3 + * on 28/01/16 11:27 from the specification file + * /home/abigail/code/intellij-grammar-kit-test/src/test/_GrammarLexer.flex + */ +public class _GrammarLexer implements FlexLexer { + /** initial size of the lookahead buffer */ + private static final int ZZ_BUFFERSIZE = 16384; + + /** lexical states */ + public static final int YYINITIAL = 0; + + /** + * ZZ_LEXSTATE[l] is the state in the DFA for the lexical state l + * ZZ_LEXSTATE[l+1] is the state in the DFA for the lexical state l + * at the beginning of a line + * l is of the form l = 2*k, k a non negative integer + */ + private static final int ZZ_LEXSTATE[] = { + 0, 0 + }; + + /** + * Translates characters to character classes + */ + private static final String ZZ_CMAP_PACKED = + "\11\0\1\1\1\1\1\0\1\1\1\1\22\0\1\1\101\0\1\13"+ + "\1\0\1\3\1\14\1\0\1\10\1\0\1\2\3\0\1\12\1\7"+ + "\3\0\1\6\1\4\1\5\1\11\uff8a\0"; + + /** + * Translates characters to character classes + */ + private static final char [] ZZ_CMAP = zzUnpackCMap(ZZ_CMAP_PACKED); + + /** + * Translates DFA states to action switch labels. + */ + private static final int [] ZZ_ACTION = zzUnpackAction(); + + private static final String ZZ_ACTION_PACKED_0 = + "\1\0\1\1\1\2\3\1\1\3\10\0\1\4\1\5"; + + private static int [] zzUnpackAction() { + int [] result = new int[17]; + int offset = 0; + offset = zzUnpackAction(ZZ_ACTION_PACKED_0, offset, result); + return result; + } + + private static int zzUnpackAction(String packed, int offset, int [] result) { + int i = 0; /* index in packed string */ + int j = offset; /* index in unpacked array */ + int l = packed.length(); + while (i < l) { + int count = packed.charAt(i++); + int value = packed.charAt(i++); + do result[j++] = value; while (--count > 0); + } + return j; + } + + + /** + * Translates a state to a row index in the transition table + */ + private static final int [] ZZ_ROWMAP = zzUnpackRowMap(); + + private static final String ZZ_ROWMAP_PACKED_0 = + "\0\0\0\15\0\32\0\47\0\64\0\101\0\15\0\116"+ + "\0\133\0\150\0\165\0\202\0\217\0\234\0\251\0\15"+ + "\0\15"; + + private static int [] zzUnpackRowMap() { + int [] result = new int[17]; + int offset = 0; + offset = zzUnpackRowMap(ZZ_ROWMAP_PACKED_0, offset, result); + return result; + } + + private static int zzUnpackRowMap(String packed, int offset, int [] result) { + int i = 0; /* index in packed string */ + int j = offset; /* index in unpacked array */ + int l = packed.length(); + while (i < l) { + int high = packed.charAt(i++) << 16; + result[j++] = high | packed.charAt(i++); + } + return j; + } + + /** + * The transition table of the DFA + */ + private static final int [] ZZ_TRANS = zzUnpackTrans(); + + private static final String ZZ_TRANS_PACKED_0 = + "\1\2\1\3\1\4\1\2\1\5\2\2\1\6\5\2"+ + "\16\0\1\3\16\0\1\7\16\0\1\10\20\0\1\11"+ + "\11\0\1\12\20\0\1\13\4\0\1\14\25\0\1\15"+ + "\10\0\1\16\21\0\1\17\10\0\1\20\12\0\1\21"+ + "\6\0"; + + private static int [] zzUnpackTrans() { + int [] result = new int[182]; + int offset = 0; + offset = zzUnpackTrans(ZZ_TRANS_PACKED_0, offset, result); + return result; + } + + private static int zzUnpackTrans(String packed, int offset, int [] result) { + int i = 0; /* index in packed string */ + int j = offset; /* index in unpacked array */ + int l = packed.length(); + while (i < l) { + int count = packed.charAt(i++); + int value = packed.charAt(i++); + value--; + do result[j++] = value; while (--count > 0); + } + return j; + } + + + /* error codes */ + private static final int ZZ_UNKNOWN_ERROR = 0; + private static final int ZZ_NO_MATCH = 1; + private static final int ZZ_PUSHBACK_2BIG = 2; + private static final char[] EMPTY_BUFFER = new char[0]; + private static final int YYEOF = -1; + private static java.io.Reader zzReader = null; // Fake + + /* error messages for the codes above */ + private static final String ZZ_ERROR_MSG[] = { + "Unkown internal scanner error", + "Error: could not match input", + "Error: pushback value was too large" + }; + + /** + * ZZ_ATTRIBUTE[aState] contains the attributes of state aState + */ + private static final int [] ZZ_ATTRIBUTE = zzUnpackAttribute(); + + private static final String ZZ_ATTRIBUTE_PACKED_0 = + "\1\0\1\11\4\1\1\11\10\0\2\11"; + + private static int [] zzUnpackAttribute() { + int [] result = new int[17]; + int offset = 0; + offset = zzUnpackAttribute(ZZ_ATTRIBUTE_PACKED_0, offset, result); + return result; + } + + private static int zzUnpackAttribute(String packed, int offset, int [] result) { + int i = 0; /* index in packed string */ + int j = offset; /* index in unpacked array */ + int l = packed.length(); + while (i < l) { + int count = packed.charAt(i++); + int value = packed.charAt(i++); + do result[j++] = value; while (--count > 0); + } + return j; + } + + /** the current state of the DFA */ + private int zzState; + + /** the current lexical state */ + private int zzLexicalState = YYINITIAL; + + /** this buffer contains the current text to be matched and is + the source of the yytext() string */ + private CharSequence zzBuffer = ""; + + /** this buffer may contains the current text array to be matched when it is cheap to acquire it */ + private char[] zzBufferArray; + + /** the textposition at the last accepting state */ + private int zzMarkedPos; + + /** the textposition at the last state to be included in yytext */ + private int zzPushbackPos; + + /** the current text position in the buffer */ + private int zzCurrentPos; + + /** startRead marks the beginning of the yytext() string in the buffer */ + private int zzStartRead; + + /** endRead marks the last character in the buffer, that has been read + from input */ + private int zzEndRead; + + /** + * zzAtBOL == true <=> the scanner is currently at the beginning of a line + */ + private boolean zzAtBOL = true; + + /** zzAtEOF == true <=> the scanner is at the EOF */ + private boolean zzAtEOF; + + /* user code: */ + public _GrammarLexer() { + this((java.io.Reader)null); + } + + + /** + * Creates a new scanner + * + * @param in the java.io.Reader to read input from. + */ + public _GrammarLexer(java.io.Reader in) { + this.zzReader = in; + } + + + /** + * Unpacks the compressed character translation table. + * + * @param packed the packed character translation table + * @return the unpacked character translation table + */ + private static char [] zzUnpackCMap(String packed) { + char [] map = new char[0x10000]; + int i = 0; /* index in packed string */ + int j = 0; /* index in unpacked array */ + while (i < 52) { + int count = packed.charAt(i++); + char value = packed.charAt(i++); + do map[j++] = value; while (--count > 0); + } + return map; + } + + public final int getTokenStart(){ + return zzStartRead; + } + + public final int getTokenEnd(){ + return getTokenStart() + yylength(); + } + + public void reset(CharSequence buffer, int start, int end,int initialState){ + zzBuffer = buffer; + zzBufferArray = com.intellij.util.text.CharArrayUtil.fromSequenceWithoutCopying(buffer); + zzCurrentPos = zzMarkedPos = zzStartRead = start; + zzPushbackPos = 0; + zzAtEOF = false; + zzAtBOL = true; + zzEndRead = end; + yybegin(initialState); + } + + /** + * Refills the input buffer. + * + * @return false, iff there was new input. + * + * @exception java.io.IOException if any I/O-Error occurs + */ + private boolean zzRefill() throws java.io.IOException { + return true; + } + + + /** + * Returns the current lexical state. + */ + public final int yystate() { + return zzLexicalState; + } + + + /** + * Enters a new lexical state + * + * @param newState the new lexical state + */ + public final void yybegin(int newState) { + zzLexicalState = newState; + } + + + /** + * Returns the text matched by the current regular expression. + */ + public final CharSequence yytext() { + return zzBuffer.subSequence(zzStartRead, zzMarkedPos); + } + + + /** + * Returns the character at position pos from the + * matched text. + * + * It is equivalent to yytext().charAt(pos), but faster + * + * @param pos the position of the character to fetch. + * A value from 0 to yylength()-1. + * + * @return the character at position pos + */ + public final char yycharat(int pos) { + return zzBufferArray != null ? zzBufferArray[zzStartRead+pos]:zzBuffer.charAt(zzStartRead+pos); + } + + + /** + * Returns the length of the matched text region. + */ + public final int yylength() { + return zzMarkedPos-zzStartRead; + } + + + /** + * Reports an error that occured while scanning. + * + * In a wellformed scanner (no or only correct usage of + * yypushback(int) and a match-all fallback rule) this method + * will only be called with things that "Can't Possibly Happen". + * If this method is called, something is seriously wrong + * (e.g. a JFlex bug producing a faulty scanner etc.). + * + * Usual syntax/scanner level error handling should be done + * in error fallback rules. + * + * @param errorCode the code of the errormessage to display + */ + private void zzScanError(int errorCode) { + String message; + try { + message = ZZ_ERROR_MSG[errorCode]; + } + catch (ArrayIndexOutOfBoundsException e) { + message = ZZ_ERROR_MSG[ZZ_UNKNOWN_ERROR]; + } + + throw new Error(message); + } + + + /** + * Pushes the specified amount of characters back into the input stream. + * + * They will be read again by then next call of the scanning method + * + * @param number the number of characters to be read again. + * This number must not be greater than yylength()! + */ + public void yypushback(int number) { + if ( number > yylength() ) + zzScanError(ZZ_PUSHBACK_2BIG); + + zzMarkedPos -= number; + } + + + /** + * Resumes scanning until the next regular expression is matched, + * the end of input is encountered or an I/O-Error occurs. + * + * @return the next token + * @exception java.io.IOException if any I/O-Error occurs + */ + public IElementType advance() throws java.io.IOException { + int zzInput; + int zzAction; + + // cached fields: + int zzCurrentPosL; + int zzMarkedPosL; + int zzEndReadL = zzEndRead; + CharSequence zzBufferL = zzBuffer; + char[] zzBufferArrayL = zzBufferArray; + char [] zzCMapL = ZZ_CMAP; + + int [] zzTransL = ZZ_TRANS; + int [] zzRowMapL = ZZ_ROWMAP; + int [] zzAttrL = ZZ_ATTRIBUTE; + + while (true) { + zzMarkedPosL = zzMarkedPos; + + zzAction = -1; + + zzCurrentPosL = zzCurrentPos = zzStartRead = zzMarkedPosL; + + zzState = ZZ_LEXSTATE[zzLexicalState]; + + + zzForAction: { + while (true) { + + if (zzCurrentPosL < zzEndReadL) + zzInput = (zzBufferArrayL != null ? zzBufferArrayL[zzCurrentPosL++] : zzBufferL.charAt(zzCurrentPosL++)); + else if (zzAtEOF) { + zzInput = YYEOF; + break zzForAction; + } + else { + // store back cached positions + zzCurrentPos = zzCurrentPosL; + zzMarkedPos = zzMarkedPosL; + boolean eof = zzRefill(); + // get translated positions and possibly new buffer + zzCurrentPosL = zzCurrentPos; + zzMarkedPosL = zzMarkedPos; + zzBufferL = zzBuffer; + zzEndReadL = zzEndRead; + if (eof) { + zzInput = YYEOF; + break zzForAction; + } + else { + zzInput = (zzBufferArrayL != null ? zzBufferArrayL[zzCurrentPosL++] : zzBufferL.charAt(zzCurrentPosL++)); + } + } + int zzNext = zzTransL[ zzRowMapL[zzState] + zzCMapL[zzInput] ]; + if (zzNext == -1) break zzForAction; + zzState = zzNext; + + int zzAttributes = zzAttrL[zzState]; + if ( (zzAttributes & 1) == 1 ) { + zzAction = zzState; + zzMarkedPosL = zzCurrentPosL; + if ( (zzAttributes & 8) == 8 ) break zzForAction; + } + + } + } + + // store back cached position + zzMarkedPos = zzMarkedPosL; + + switch (zzAction < 0 ? zzAction : ZZ_ACTION[zzAction]) { + case 1: + { return com.intellij.psi.TokenType.BAD_CHARACTER; + } + case 6: break; + case 4: + { return BNF_STRING; + } + case 7: break; + case 5: + { return BNF_NUMBER; + } + case 8: break; + case 3: + { return BNF_ID; + } + case 9: break; + case 2: + { return com.intellij.psi.TokenType.WHITE_SPACE; + } + case 10: break; + default: + if (zzInput == YYEOF && zzStartRead == zzCurrentPos) { + zzAtEOF = true; + return null; + } + else { + zzScanError(ZZ_NO_MATCH); + } + } + } + } + + +} diff --git a/samples/Mathematica/TestArithmetic.mt b/samples/Mathematica/TestArithmetic.mt new file mode 100644 index 00000000..40cbe122 --- /dev/null +++ b/samples/Mathematica/TestArithmetic.mt @@ -0,0 +1 @@ +Test[1 + 2, 3, TestID -> "One plus two"] diff --git a/samples/Mathematica/TestString.mt b/samples/Mathematica/TestString.mt new file mode 100644 index 00000000..20539407 --- /dev/null +++ b/samples/Mathematica/TestString.mt @@ -0,0 +1 @@ +Test["a" <> "b", "ab", TestID -> "Concat \"a\" and \"b\""] diff --git a/samples/Mathematica/TestSuite.mt b/samples/Mathematica/TestSuite.mt new file mode 100644 index 00000000..b18ae062 --- /dev/null +++ b/samples/Mathematica/TestSuite.mt @@ -0,0 +1,5 @@ +TestSuite[ + { "TestArithmetic.mt" + , "TestString.mt" + } +] diff --git a/samples/MediaWiki/README.wiki b/samples/MediaWiki/README.wiki new file mode 100644 index 00000000..546c3290 --- /dev/null +++ b/samples/MediaWiki/README.wiki @@ -0,0 +1,694 @@ += Name = + +'''nginx_tcp_proxy_module''' - support TCP proxy with Nginx + += Installation = + +Download the latest stable version of the release tarball of this module from [http://github.com/yaoweibin/nginx_tcp_proxy_module github] + +Grab the nginx source code from [http://nginx.org/ nginx.org], for example, the version 1.2.1 (see nginx compatibility), and then build the source with this module: + + + $ wget 'http://nginx.org/download/nginx-1.2.1.tar.gz' + $ tar -xzvf nginx-1.2.1.tar.gz + $ cd nginx-1.2.1/ + $ patch -p1 < /path/to/nginx_tcp_proxy_module/tcp.patch + + $ ./configure --add-module=/path/to/nginx_tcp_proxy_module + + $ make + $ make install + + + += Synopsis = + + +http { + + server { + listen 80; + + location /status { + tcp_check_status; + } + } +} + + + + +#You can also include tcp_proxy.conf file individually + +#include /path/to/tcp_proxy.conf; + +tcp { + + upstream cluster { + # simple round-robin + server 192.168.0.1:80; + server 192.168.0.2:80; + + check interval=3000 rise=2 fall=5 timeout=1000; + + #check interval=3000 rise=2 fall=5 timeout=1000 type=ssl_hello; + + #check interval=3000 rise=2 fall=5 timeout=1000 type=http; + #check_http_send "GET / HTTP/1.0\r\n\r\n"; + #check_http_expect_alive http_2xx http_3xx; + } + + server { + listen 8888; + + proxy_pass cluster; + } +} + + += Description = + +This module actually include many modules: ngx_tcp_module, ngx_tcp_core_module, ngx_tcp_upstream_module, ngx_tcp_proxy_module, ngx_tcp_websocket_module, ngx_tcp_ssl_module, ngx_tcp_upstream_ip_hash_module. All these modules work together to support TCP proxy with Nginx. I also added other features: ip_hash, upstream server health check, status monitor. + +The motivation of writing these modules is Nginx's high performance and robustness. At first, I developed this module just for general TCP proxy. And now, this module is frequently used in websocket reverse proxying. + +Note, You can't use the same listening port with HTTP modules. + += Directives = + +== ngx_tcp_moodule == + +=== tcp === + +'''syntax:''' ''tcp {...}'' + +'''default:''' ''none'' + +'''context:''' ''main'' + +'''description:''' All the tcp related directives are contained in the tcp block. + + +'''ngx_tcp_core_moodule''' + +=== server === + +'''syntax:''' ''server {...}'' + +'''default:''' ''none'' + +'''context:''' ''tcp'' + +'''description:''' All the specific server directives are contained in the server block. + +=== listen === + +'''syntax:''' ''listen address:port [ bind | ssl | default]'' + +'''default:''' ''none'' + +'''context:''' ''server'' + +'''description:''' The same as [http://wiki.nginx.org/NginxMailCoreModule#listen listen]. The parameter of default means the default server if you have several server blocks with the same port. + +=== access_log === + +'''syntax:''' ''access_log path [buffer=size] | off'' + +'''default:''' ''access_log logs/tcp_access.log'' + +'''context:''' ''tcp, server'' + +'''description:''' Set the access.log. Each record's format is like this: + +
+
+log_time worker_process_pid client_ip host_ip accept_time upstream_ip bytes_read bytes_write
+
+2011/08/02 06:19:07 [5972] 127.0.0.1 0.0.0.0:1982 2011/08/02 06:18:19 172.19.0.129:80 80 236305
+
+
+ +* ''log_time'': The current time when writing this log. The log action is called when the proxy session is closed. +* ''worker_process_pid'': the pid of worker process +* ''client_ip'': the client ip +* ''host_ip'': the server ip and port +* ''accept_time'': the time when the server accepts client's connection +* ''upstream_ip'': the upstream server's ip +* ''bytes_read'': the bytes read from client +* ''bytes_write'': the bytes written to client + +=== allow === + +'''syntax:''' ''allow [ address | CIDR | all ]'' + +'''default:''' ''none'' + +'''context:''' ''server'' + +'''description:''' Directive grants access for the network or addresses indicated. + +=== deny === + +'''syntax:''' ''deny [ address | CIDR | all ]'' + +'''default:''' ''none'' + +'''context:''' ''server'' + +'''description:''' Directive grants access for the network or addresses indicated. + +=== so_keepalive === + +'''syntax:''' ''so_keepalive on|off'' + +'''default:''' ''off'' + +'''context:''' ''main, server'' + +'''description:''' The same as [http://wiki.nginx.org/NginxMailCoreModule#so_keepalive so_keepalive]. + +=== tcp_nodelay === + +'''syntax:''' ''tcp_nodelay on|off'' + +'''default:''' ''on'' + +'''context:''' ''main, server'' + +'''description:''' The same as [http://wiki.nginx.org/NginxHttpCoreModule#tcp_nodelay tcp_nodelay]. + +=== timeout === + +'''syntax:''' ''timeout milliseconds'' + +'''default:''' ''60000'' + +'''context:''' ''main, server'' + +'''description:''' set the timeout value with clients. + +=== server_name === + +'''syntax:''' ''server_name name'' + +'''default:''' ''The name of the host, obtained through gethostname()'' + +'''context:''' ''tcp, server'' + +'''description:''' The same as [http://wiki.nginx.org/NginxMailCoreModule#server_name server_name]. You can specify several server name in different server block with the same port. They can be used in websocket module. + +=== resolver === + +'''syntax:''' ''resolver address'' + +'''default:''' ''none'' + +'''context:''' ''tcp, server'' + +'''description:''' DNS server + +=== resolver_timeout === + +'''syntax:''' ''resolver_timeout time'' + +'''default:''' ''30s'' + +'''context:''' ''tcp, server'' + +'''description:''' Resolver timeout in seconds. + + +== ngx_tcp_upstream_module == + +=== upstream === + +'''syntax:''' ''upstream {...}'' + +'''default:''' ''none'' + +'''context:''' ''tcp'' + +'''description:''' All the upstream directives are contained in this block. The upstream server will be dispatched with round robin by default. + +=== server === + +'''syntax:''' ''server name [parameters]'' + +'''default:''' ''none'' + +'''context:''' ''upstream'' + +'''description:''' Most of the parameters are the same as [http://wiki.nginx.org/NginxHttpUpstreamModule#server server]. Default port is 80. + +=== check === + +'''syntax:''' ''check interval=milliseconds [fall=count] [rise=count] [timeout=milliseconds] [type=tcp|ssl_hello|smtp|mysql|pop3|imap]'' + +'''default:''' ''none, if parameters omitted, default parameters are interval=30000 fall=5 rise=2 timeout=1000'' + +'''context:''' ''upstream'' + +'''description:''' Add the health check for the upstream servers. At present, the check method is a simple tcp connect. + +The parameters' meanings are: + +* ''interval'': the check request's interval time. +* ''fall''(fall_count): After fall_count check failures, the server is marked down. +* ''rise''(rise_count): After rise_count check success, the server is marked up. +* ''timeout'': the check request's timeout. +* ''type'': the check protocol type: +# ''tcp'' is a simple tcp socket connect and peek one byte. +# ''ssl_hello'' sends a client ssl hello packet and receives the server ssl hello packet. +# ''http'' sends a http request packet, receives and parses the http response to diagnose if the upstream server is alive. +# ''smtp'' sends a smtp request packet, receives and parses the smtp response to diagnose if the upstream server is alive. The response begins with '2' should be an OK response. +# ''mysql'' connects to the mysql server, receives the greeting response to diagnose if the upstream server is alive. +# ''pop3'' receives and parses the pop3 response to diagnose if the upstream server is alive. The response begins with '+' should be an OK response. +# ''imap'' connects to the imap server, receives the greeting response to diagnose if the upstream server is alive. + +=== check_http_send === + +'''syntax:''' ''check_http_send http_packet'' + +'''default:''' ''"GET / HTTP/1.0\r\n\r\n"'' + +'''context:''' ''upstream'' + +'''description:''' If you set the check type is http, then the check function will sends this http packet to check the upstream server. + +=== check_http_expect_alive === + +'''syntax:''' ''check_http_expect_alive [ http_2xx | http_3xx | http_4xx | http_5xx ]'' + +'''default:''' ''http_2xx | http_3xx'' + +'''context:''' ''upstream'' + +'''description:''' These status codes indicate the upstream server's http response is OK, the backend is alive. + +=== check_smtp_send === + +'''syntax:''' ''check_smtp_send smtp_packet'' + +'''default:''' ''"HELO smtp.localdomain\r\n"'' + +'''context:''' ''upstream'' + +'''description:''' If you set the check type is smtp, then the check function will sends this smtp packet to check the upstream server. + +=== check_smtp_expect_alive === + +'''syntax:''' ''check_smtp_expect_alive [smtp_2xx | smtp_3xx | smtp_4xx | smtp_5xx]'' + +'''default:''' ''smtp_2xx'' + +'''context:''' ''upstream'' + +'''description:''' These status codes indicate the upstream server's smtp response is OK, the backend is alive. + +=== check_shm_size === + +'''syntax:''' ''check_shm_size size'' + +'''default:''' ''(number_of_checked_upstream_blocks + 1) * pagesize'' + +'''context:''' ''tcp'' + +'''description:''' If you store hundreds of servers in one upstream block, the shared memory for health check may be not enough, you can enlarged it by this directive. + +=== tcp_check_status === + +'''syntax:''' ''tcp_check_status'' + +'''default:''' ''none'' + +'''context:''' ''location'' + +'''description:''' Display the health checking servers' status by HTTP. This directive is set in the http block. + +The table field meanings are: + +* ''Index'': The server index in the check table +* ''Name'' : The upstream server name +* ''Status'': The marked status of the server. +* ''Busyness'': The number of connections which are connecting to the server. +* ''Rise counts'': Count the successful checking +* ''Fall counts'': Count the unsuccessful checking +* ''Access counts'': Count the times accessing to this server +* ''Check type'': The type of the check packet + + +'''ngx_tcp_upstream_busyness_module''' + +=== busyness === + +'''syntax:''' ''busyness'' + +'''default:''' ''none'' + +'''context:''' ''upstream'' + +'''description:''' the upstream server will be dispatched by backend servers' busyness. + + +'''ngx_tcp_upstream_ip_hash_module''' + +=== ip_hash === + +'''syntax:''' ''ip_hash'' + +'''default:''' ''none'' + +'''context:''' ''upstream'' + +'''description:''' the upstream server will be dispatched by ip_hash. + + +== ngx_tcp_proxy_module == + +=== proxy_pass === + +'''syntax:''' ''proxy_pass host:port'' + +'''default:''' ''none'' + +'''context:''' ''server'' + +'''description:''' proxy the request to the backend server. Default port is 80. + +=== proxy_buffer === + +'''syntax:''' ''proxy_buffer size'' + +'''default:''' ''4k'' + +'''context:''' ''tcp, server'' + +'''description:''' set the size of proxy buffer. + +=== proxy_connect_timeout === + +'''syntax:''' ''proxy_connect_timeout miliseconds'' + +'''default:''' ''60000'' + +'''context:''' ''tcp, server'' + +'''description:''' set the timeout value of connection to backends. + +=== proxy_read_timeout === + +'''syntax:''' ''proxy_read_timeout miliseconds'' + +'''default:''' ''60000'' + +'''context:''' ''tcp, server'' + +'''description:''' set the timeout value of reading from backends. + +=== proxy_send_timeout === + +'''syntax:''' ''proxy_send_timeout miliseconds'' + +'''default:''' ''60000'' + +'''context:''' ''tcp, server'' + +'''description:''' set the timeout value of sending to backends. + + +== ngx_tcp_websocket_module == + +=== websocket_pass === + +'''syntax:''' ''websocket_pass [path] host:port'' + +'''default:''' ''none'' + +'''context:''' ''server'' + +'''description:''' proxy the websocket request to the backend server. Default port is 80. You can specify several different paths in the same server block. + +=== websocket_buffer === + +'''syntax:''' ''websocket_buffer size'' + +'''default:''' ''4k'' + +'''context:''' ''tcp, server'' + +'''description:''' set the size of proxy buffer. + +=== websocket_connect_timeout === + +'''syntax:''' ''websocket_connect_timeout miliseconds'' + +'''default:''' ''60000'' + +'''context:''' ''tcp, server'' + +'''description:''' set the timeout value of connection to backends. + +=== websocket_read_timeout === + +'''syntax:''' ''websocket_read_timeout miliseconds'' + +'''default:''' ''60000'' + +'''context:''' ''tcp, server'' + +'''description:''' set the timeout value of reading from backends. Your timeout will be the minimum of this and the *timeout* parameter, so if you want a long timeout for your websockets, make sure to set both paramaters. + +=== websocket_send_timeout === + +'''syntax:''' ''websocket_send_timeout miliseconds'' + +'''default:''' ''60000'' + +'''context:''' ''tcp, server'' + +'''description:''' set the timeout value of sending to backends. + + +== ngx_tcp_ssl_module == + +The default config file includes this ngx_tcp_ssl_module. If you want to just compile nginx without ngx_tcp_ssl_module, copy the ngx_tcp_proxy_module/config_without_ssl to ngx_tcp_proxy_module/config, reconfigrure and compile nginx. + +=== ssl === + +'''syntax:''' ''ssl [on|off] '' + +'''default:''' ''ssl off'' + +'''context:''' ''tcp, server'' + +Enables SSL for a server. + +=== ssl_certificate === + +'''syntax:''' ''ssl_certificate file'' + +'''default:''' ''ssl_certificate cert.pem'' + +'''context:''' ''tcp, server'' + +This directive specifies the file containing the certificate, in PEM format. This file can contain also other certificates and the server private key. + +=== ssl_certificate_key === + +'''syntax:''' ''ssl_certificate_key file'' + +'''default:''' ''ssl_certificate_key cert.pem'' + +'''context:''' ''tcp, server'' + +This directive specifies the file containing the private key, in PEM format. + +=== ssl_client_certificate === + +'''syntax:''' ''ssl_client_certificate file'' + +'''default:''' ''none'' + +'''context:''' ''tcp, server'' + +This directive specifies the file containing the CA (root) certificate, in PEM format, that is used for validating client certificates. + +=== ssl_dhparam === + +'''syntax:''' ''ssl_dhparam file'' + +'''default:''' ''none'' + +'''context:''' ''tcp, server'' + +This directive specifies a file containing Diffie-Hellman key agreement protocol cryptographic parameters, in PEM format, utilized for exchanging session keys between server and client. + +=== ssl_ciphers === + +'''syntax:''' ''ssl_ciphers openssl_cipherlist_spec'' + +'''default:''' ''ssl_ciphers HIGH:!aNULL:!MD5'' + +'''context:''' ''tcp, server'' + +This directive describes the list of cipher suites the server supports for establishing a secure connection. Cipher suites are specified in the [http://openssl.org/docs/apps/ciphers.html OpenSSL] cipherlist format, for example: + + +ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP; + + +The complete cipherlist supported by the currently installed version of OpenSSL in your platform can be obtained by issuing the command: +
+openssl ciphers
+
+ +=== ssl_crl === + +'''syntax:''' ''ssl_crl file'' + +'''default:''' ''none'' + +'''context:''' ''tcp, server'' + +This directive specifies the filename of a Certificate Revocation List, in PEM format, which is used to check the revocation status of certificates. + +=== ssl_prefer_server_ciphers === + +'''syntax:''' ''ssl_prefer_server_ciphers [on|off] '' + +'''default:''' ''ssl_prefer_server_ciphers off'' + +'''context:''' ''tcp, server'' + +The server requires that the cipher suite list for protocols SSLv3 and TLSv1 are to be preferred over the client supported cipher suite list. + +=== ssl_protocols === + +'''syntax:''' ''ssl_protocols [SSLv2] [SSLv3] [TLSv1] [TLSv1.1] [TLSv1.2]'' + +'''default:''' ''ssl_protocols SSLv3 TLSv1 TLSv1.1 TLSv1.2'' + +'''context:''' ''tcp, server'' + +This directive enables the protocol versions specified. + +=== ssl_verify_client === + +'''syntax:''' ''ssl_verify_client on|off|optional'' + +'''default:''' ''ssl_verify_client off'' + +'''context:''' ''tcp, server'' + +This directive enables the verification of the client identity. Parameter 'optional' checks the client identity using its certificate in case it was made available to the server. + +=== ssl_verify_depth === + +'''syntax:''' ''ssl_verify_depth number'' + +'''default:''' ''ssl_verify_depth 1'' + +'''context:''' ''tcp, server'' + +This directive sets how deep the server should go in the client provided certificate chain in order to verify the client identity. + +=== ssl_session_cache === + +'''syntax:''' ''ssl_session_cache off|none|builtin:size and/or shared:name:size'' + +'''default:''' ''ssl_session_cache off'' + +'''context:''' ''tcp, server'' + +The directive sets the types and sizes of caches to store the SSL sessions. + +The cache types are: + +* off -- Hard off: nginx says explicitly to a client that sessions can not reused. +* none -- Soft off: nginx says to a client that session can be resued, but nginx actually never reuses them. This is workaround for some mail clients as ssl_session_cache may be used in mail proxy as well as in HTTP server. +* builtin -- the OpenSSL builtin cache, is used inside one worker process only. The cache size is assigned in the number of the sessions. Note: there appears to be a memory fragmentation issue using this method, please take that into consideration when using this. See "References" below. +* shared -- the cache is shared between all worker processes. The size of the cache is assigned in bytes: 1 MB cache can contain roughly 4000 sessions. Each shared cache must be given an arbitrary name. A shared cache with a given name can be used in several virtual hosts. +It's possible to use both types of cache — builtin and shared — simultaneously, for example: + + +ssl_session_cache builtin:1000 shared:SSL:10m; + + +Bear in mind however, that using only shared cache, i.e., without builtin, should be more effective. + +=== ssl_session_timeout === + +'''syntax:''' ''ssl_session_timeout time'' + +'''default:''' ''ssl_session_timeout 5m'' + +'''context:''' ''tcp, server'' + +This directive defines the maximum time during which the client can re-use the previously negotiated cryptographic parameters of the secure session that is stored in the SSL cache. + += Compatibility = + +* My test bed is 0.7.65+ + += Notes = + +The http_response_parse.rl and smtp_response_parse.rl are [http://www.complang.org/ragel/ ragel] scripts , you can edit the script and compile it like this: + + + $ ragel -G2 http_response_parse.rl + $ ragel -G2 smtp_response_parse.rl + + += TODO = + +* refact this module, make it more extendable for adding third-party modules +* manipulate header like http module's proxy_set_header +* built-in variable support +* custom log format +* syslog support +* FTP/IRC proxying + += Known Issues = + +* This module can't use the same listening port with the HTTP module. + += Changelogs = + +== v0.2.0 == + +* add ssl proxy module +* add websocket proxy module +* add upstream busyness module +* add tcp access log module + +== v0.19 == + +* add many check methods + +== v0.1 == + +* first release + += Authors = + +Weibin Yao(姚伟斌) ''yaoweibin at gmail dot com'' + += Copyright & License = + +This README template copy from [http://github.com/agentzh agentzh]. + +I borrowed a lot of code from upstream and mail module from the nginx 0.7.* core. This part of code is copyrighted by Igor Sysoev. And the health check part is borrowed the design of Jack Lindamood's healthcheck module [http://github.com/cep21/healthcheck_nginx_upstreams healthcheck_nginx_upstreams]; + +This module is licensed under the BSD license. + +Copyright (C) 2013 by Weibin Yao . + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/samples/Perl/Sample.pod b/samples/Perl/Sample.pod new file mode 100644 index 00000000..00fa0b99 --- /dev/null +++ b/samples/Perl/Sample.pod @@ -0,0 +1,10 @@ +use strict; +use warnings; +package DZT::Sample; + +sub return_arrayref_of_values_passed { + my $invocant = shift; + return \@_; +} + +1; diff --git a/samples/Perl6/hash.t b/samples/Perl6/hash.t index 7bff849e..24ba2b50 100644 --- a/samples/Perl6/hash.t +++ b/samples/Perl6/hash.t @@ -12,7 +12,6 @@ unless EVAL 'EVAL("1", :lang)' { die unless EVAL(q/ package My::Hash; -use strict; sub new { my ($class, $ref) = @_; diff --git a/samples/Perl/PSGI.pod b/samples/Pod/PSGI.pod similarity index 100% rename from samples/Perl/PSGI.pod rename to samples/Pod/PSGI.pod diff --git a/samples/Python/closure_js_binary.bzl b/samples/Python/closure_js_binary.bzl new file mode 100644 index 00000000..c4453ffb --- /dev/null +++ b/samples/Python/closure_js_binary.bzl @@ -0,0 +1,124 @@ +# Copyright 2015 The Bazel Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Build definitions for JavaScript binaries compiled with the Closure Compiler. + +A single file is produced with the _compiled.js suffix. + +By default, the name of the entry point is assumed to be the same as that of the +build target. This behaviour may be overridden with the "main" attribute. + +The optimization level may be set with the "compilation_level" attribute. +Supported values are: unobfuscated, simple, and advanced. + +Example: + + closure_js_binary( + name = "hello", + compilation_level = "simple", + language_in = "ecmascript6", + language_out = "ecmascript3", + externs = ["//third_party/javascript/google_cast/cast.js"], + deps = [ + "@closure_library//:closure_library", + ":hello_lib", + ], + ) + +This rule will produce hello_combined.js. +""" + +_COMPILATION_LEVELS = { + "whitespace_only": [ + "--compilation_level=WHITESPACE_ONLY", + "--formatting=PRETTY_PRINT" + ], + "simple": ["--compilation_level=SIMPLE"], + "advanced": ["--compilation_level=ADVANCED"] +} + +_SUPPORTED_LANGUAGES = { + "es3": ["ES3"], + "ecmascript3": ["ECMASCRIPT3"], + "es5": ["ES5"], + "ecmascript5": ["ECMASCRIPT5"], + "es5_strict": ["ES5_STRICT"], + "ecmascript5_strict": ["ECMASCRIPT5_STRICT"], + "es6": ["ES6"], + "ecmascript6": ["ECMASCRIPT6"], + "es6_strict": ["ES6_STRICT"], + "ecmascript6_strict": ["ECMASCRIPT6_STRICT"], + "es6_typed": ["ES6_TYPED"], + "ecmascript6_typed": ["ECMASCRIPT6_TYPED"], +} + +def _impl(ctx): + externs = set(order="compile") + srcs = set(order="compile") + for dep in ctx.attr.deps: + externs += dep.transitive_js_externs + srcs += dep.transitive_js_srcs + + args = [ + "--entry_point=goog:%s" % ctx.attr.main, + "--js_output_file=%s" % ctx.outputs.out.path, + "--dependency_mode=LOOSE", + "--warning_level=VERBOSE", + ] + (["--js=%s" % src.path for src in srcs] + + ["--externs=%s" % extern.path for extern in externs]) + + # Set the compilation level. + if ctx.attr.compilation_level in _COMPILATION_LEVELS: + args += _COMPILATION_LEVELS[ctx.attr.compilation_level] + else: + fail("Invalid compilation_level '%s', expected one of %s" % + (ctx.attr.compilation_level, _COMPILATION_LEVELS.keys())) + + # Set the language in. + if ctx.attr.language_in in _SUPPORTED_LANGUAGES: + args += "--language_in=" + _SUPPORTED_LANGUAGES[ctx.attr.language_in] + else: + fail("Invalid language_in '%s', expected one of %s" % + (ctx.attr.language_in, _SUPPORTED_LANGUAGES.keys())) + + # Set the language out. + if ctx.attr.language_out in _SUPPORTED_LANGUAGES: + args += "--language_out=" + _SUPPORTED_LANGUAGES[ctx.attr.language_out] + else: + fail("Invalid language_out '%s', expected one of %s" % + (ctx.attr.language_out, _SUPPORTED_LANGUAGES.keys())) + + ctx.action( + inputs=list(srcs) + list(externs), + outputs=[ctx.outputs.out], + arguments=args, + executable=ctx.executable._closure_compiler) + + return struct(files=set([ctx.outputs.out])) + +closure_js_binary = rule( + implementation=_impl, + attrs={ + "deps": attr.label_list( + allow_files=False, + providers=["transitive_js_externs", "transitive_js_srcs"]), + "main": attr.string(default="%{name}"), + "compilation_level": attr.string(default="advanced"), + "language_in": attr.string(default="ecmascript6"), + "language_out": attr.string(default="ecmascript3"), + "_closure_compiler": attr.label( + default=Label("//external:closure_compiler_"), + executable=True), + }, + outputs={"out": "%{name}_combined.js"}) diff --git a/samples/Stan/congress.stan b/samples/Stan/congress.stan new file mode 100644 index 00000000..6e7408e5 --- /dev/null +++ b/samples/Stan/congress.stan @@ -0,0 +1,14 @@ +data { + int N; + vector[N] incumbency_88; + vector[N] vote_86; + vector[N] vote_88; +} +parameters { + vector[3] beta; + real sigma; +} +model { + vote_88 ~ normal(beta[1] + beta[2] * vote_86 + + beta[3] * incumbency_88,sigma); +} diff --git a/samples/Stan/dogs.stan b/samples/Stan/dogs.stan new file mode 100644 index 00000000..ebca16d7 --- /dev/null +++ b/samples/Stan/dogs.stan @@ -0,0 +1,31 @@ +data { + int n_dogs; + int n_trials; + int y[n_dogs,n_trials]; +} +parameters { + vector[3] beta; +} +transformed parameters { + matrix[n_dogs,n_trials] n_avoid; + matrix[n_dogs,n_trials] n_shock; + matrix[n_dogs,n_trials] p; + + for (j in 1:n_dogs) { + n_avoid[j,1] <- 0; + n_shock[j,1] <- 0; + for (t in 2:n_trials) { + n_avoid[j,t] <- n_avoid[j,t-1] + 1 - y[j,t-1]; + n_shock[j,t] <- n_shock[j,t-1] + y[j,t-1]; + } + for (t in 1:n_trials) + p[j,t] <- beta[1] + beta[2] * n_avoid[j,t] + beta[3] * n_shock[j,t]; + } +} +model { + beta ~ normal(0, 100); + for (i in 1:n_dogs) { + for (j in 1:n_trials) + y[i,j] ~ bernoulli_logit(p[i,j]); + } +} diff --git a/samples/Stan/schools.stan b/samples/Stan/schools.stan new file mode 100644 index 00000000..171864a1 --- /dev/null +++ b/samples/Stan/schools.stan @@ -0,0 +1,26 @@ +data { + int N; + vector[N] y; + vector[N] sigma_y; +} +parameters { + vector[N] eta; + real mu_theta; + real sigma_eta; + real xi; +} +transformed parameters { + real sigma_theta; + vector[N] theta; + + theta <- mu_theta + xi * eta; + sigma_theta <- fabs(xi) / sigma_eta; +} +model { + mu_theta ~ normal(0, 100); + sigma_eta ~ inv_gamma(1, 1); //prior distribution can be changed to uniform + + eta ~ normal(0, sigma_eta); + xi ~ normal(0, 5); + y ~ normal(theta,sigma_y); +} diff --git a/samples/YANG/sfc-lisp-impl.yang b/samples/YANG/sfc-lisp-impl.yang new file mode 100644 index 00000000..761d9829 --- /dev/null +++ b/samples/YANG/sfc-lisp-impl.yang @@ -0,0 +1,55 @@ +module sfc-lisp-impl { + + yang-version 1; + namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sfc-lisp:impl"; + prefix "sfc-lisp-impl"; + + import config { prefix config; revision-date 2013-04-05; } + import rpc-context { prefix rpcx; revision-date 2013-06-17; } + import opendaylight-md-sal-binding { prefix mdsal; revision-date 2013-10-28; } + + + description + "This module contains the base YANG definitions for + sfc-lisp implementation."; + + revision "2015-04-27" { + description + "Initial revision."; + } + + // This is the definition of the service implementation as a module identity + identity sfc-lisp-impl { + base config:module-type; + + // Specifies the prefix for generated java classes. + config:java-name-prefix SfcLisp; + } + + + // Augments the 'configuration' choice node under modules/module. + augment "/config:modules/config:module/config:configuration" { + case sfc-lisp-impl { + when "/config:modules/config:module/config:type = 'sfc-lisp-impl'"; + + //wires in the data-broker service + container data-broker { + uses config:service-ref { + refine type { + mandatory false; + config:required-identity mdsal:binding-async-data-broker; + } + } + } + + container rpc-registry { + uses config:service-ref { + refine type { + mandatory true; + config:required-identity mdsal:binding-rpc-registry; + } + } + } + } + } +} \ No newline at end of file diff --git a/samples/reStructuredText/HACKING.rst.txt b/samples/reStructuredText/HACKING.rst.txt new file mode 100644 index 00000000..6e078204 --- /dev/null +++ b/samples/reStructuredText/HACKING.rst.txt @@ -0,0 +1,329 @@ +Contributing to SciPy +===================== + +This document aims to give an overview of how to contribute to SciPy. It +tries to answer commonly asked questions, and provide some insight into how the +community process works in practice. Readers who are familiar with the SciPy +community and are experienced Python coders may want to jump straight to the +`git workflow`_ documentation. + + +Contributing new code +--------------------- + +If you have been working with the scientific Python toolstack for a while, you +probably have some code lying around of which you think "this could be useful +for others too". Perhaps it's a good idea then to contribute it to SciPy or +another open source project. The first question to ask is then, where does +this code belong? That question is hard to answer here, so we start with a +more specific one: *what code is suitable for putting into SciPy?* +Almost all of the new code added to scipy has in common that it's potentially +useful in multiple scientific domains and it fits in the scope of existing +scipy submodules. In principle new submodules can be added too, but this is +far less common. For code that is specific to a single application, there may +be an existing project that can use the code. Some scikits (`scikit-learn`_, +`scikits-image`_, `statsmodels`_, etc.) are good examples here; they have a +narrower focus and because of that more domain-specific code than SciPy. + +Now if you have code that you would like to see included in SciPy, how do you +go about it? After checking that your code can be distributed in SciPy under a +compatible license (see FAQ for details), the first step is to discuss on the +scipy-dev mailing list. All new features, as well as changes to existing code, +are discussed and decided on there. You can, and probably should, already +start this discussion before your code is finished. + +Assuming the outcome of the discussion on the mailing list is positive and you +have a function or piece of code that does what you need it to do, what next? +Before code is added to SciPy, it at least has to have good documentation, unit +tests and correct code style. + +1. Unit tests + In principle you should aim to create unit tests that exercise all the code + that you are adding. This gives some degree of confidence that your code + runs correctly, also on Python versions and hardware or OSes that you don't + have available yourself. An extensive description of how to write unit + tests is given in the NumPy `testing guidelines`_. + +2. Documentation + Clear and complete documentation is essential in order for users to be able + to find and understand the code. Documentation for individual functions + and classes -- which includes at least a basic description, type and + meaning of all parameters and returns values, and usage examples in + `doctest`_ format -- is put in docstrings. Those docstrings can be read + within the interpreter, and are compiled into a reference guide in html and + pdf format. Higher-level documentation for key (areas of) functionality is + provided in tutorial format and/or in module docstrings. A guide on how to + write documentation is given in `how to document`_. + +3. Code style + Uniformity of style in which code is written is important to others trying + to understand the code. SciPy follows the standard Python guidelines for + code style, `PEP8`_. In order to check that your code conforms to PEP8, + you can use the `pep8 package`_ style checker. Most IDEs and text editors + have settings that can help you follow PEP8, for example by translating + tabs by four spaces. Using `pyflakes`_ to check your code is also a good + idea. + +At the end of this document a checklist is given that may help to check if your +code fulfills all requirements for inclusion in SciPy. + +Another question you may have is: *where exactly do I put my code*? To answer +this, it is useful to understand how the SciPy public API (application +programming interface) is defined. For most modules the API is two levels +deep, which means your new function should appear as +``scipy.submodule.my_new_func``. ``my_new_func`` can be put in an existing or +new file under ``/scipy//``, its name is added to the ``__all__`` +dict in that file (which lists all public functions in the file), and those +public functions are then imported in ``/scipy//__init__.py``. Any +private functions/classes should have a leading underscore (``_``) in their +name. A more detailed description of what the public API of SciPy is, is given +in `SciPy API`_. + +Once you think your code is ready for inclusion in SciPy, you can send a pull +request (PR) on Github. We won't go into the details of how to work with git +here, this is described well in the `git workflow`_ section of the NumPy +documentation and in the Github help pages. When you send the PR for a new +feature, be sure to also mention this on the scipy-dev mailing list. This can +prompt interested people to help review your PR. Assuming that you already got +positive feedback before on the general idea of your code/feature, the purpose +of the code review is to ensure that the code is correct, efficient and meets +the requirements outlined above. In many cases the code review happens +relatively quickly, but it's possible that it stalls. If you have addressed +all feedback already given, it's perfectly fine to ask on the mailing list +again for review (after a reasonable amount of time, say a couple of weeks, has +passed). Once the review is completed, the PR is merged into the "master" +branch of SciPy. + +The above describes the requirements and process for adding code to SciPy. It +doesn't yet answer the question though how decisions are made exactly. The +basic answer is: decisions are made by consensus, by everyone who chooses to +participate in the discussion on the mailing list. This includes developers, +other users and yourself. Aiming for consensus in the discussion is important +-- SciPy is a project by and for the scientific Python community. In those +rare cases that agreement cannot be reached, the `maintainers`_ of the module +in question can decide the issue. + + +Contributing by helping maintain existing code +---------------------------------------------- + +The previous section talked specifically about adding new functionality to +SciPy. A large part of that discussion also applies to maintenance of existing +code. Maintenance means fixing bugs, improving code quality or style, +documenting existing functionality better, adding missing unit tests, keeping +build scripts up-to-date, etc. The SciPy `Trac`_ bug tracker contains all +reported bugs, build/documentation issues, etc. Fixing issues described in +Trac tickets helps improve the overall quality of SciPy, and is also a good way +of getting familiar with the project. You may also want to fix a bug because +you ran into it and need the function in question to work correctly. + +The discussion on code style and unit testing above applies equally to bug +fixes. It is usually best to start by writing a unit test that shows the +problem, i.e. it should pass but doesn't. Once you have that, you can fix the +code so that the test does pass. That should be enough to send a PR for this +issue. Unlike when adding new code, discussing this on the mailing list may +not be necessary - if the old behavior of the code is clearly incorrect, no one +will object to having it fixed. It may be necessary to add some warning or +deprecation message for the changed behavior. This should be part of the +review process. + + +Other ways to contribute +------------------------ + +There are many ways to contribute other than contributing code. Participating +in discussions on the scipy-user and scipy-dev *mailing lists* is a contribution +in itself. The `scipy.org`_ *website* contains a lot of information on the +SciPy community and can always use a new pair of hands. A redesign of this +website is ongoing, see `scipy.github.com`_. The redesigned website is a +static site based on Sphinx, the sources for it are +also on Github at `scipy.org-new`_. + +The SciPy *documentation* is constantly being improved by many developers and +users. You can contribute by sending a PR on Github that improves the +documentation, but there's also a `documentation wiki`_ that is very convenient +for making edits to docstrings (and doesn't require git knowledge). Anyone can +register a username on that wiki, ask on the scipy-dev mailing list for edit +rights and make edits. The documentation there is updated every day with the +latest changes in the SciPy master branch, and wiki edits are regularly +reviewed and merged into master. Another advantage of the documentation wiki +is that you can immediately see how the reStructuredText (reST) of docstrings +and other docs is rendered as html, so you can easily catch formatting errors. + +Code that doesn't belong in SciPy itself or in another package but helps users +accomplish a certain task is valuable. `SciPy Central`_ is the place to share +this type of code (snippets, examples, plotting code, etc.). + + +Useful links, FAQ, checklist +---------------------------- + +Checklist before submitting a PR +```````````````````````````````` + + - Are there unit tests with good code coverage? + - Do all public function have docstrings including examples? + - Is the code style correct (PEP8, pyflakes) + - Is the new functionality tagged with ``.. versionadded:: X.Y.Z`` (with + X.Y.Z the version number of the next release - can be found in setup.py)? + - Is the new functionality mentioned in the release notes of the next + release? + - Is the new functionality added to the reference guide? + - In case of larger additions, is there a tutorial or more extensive + module-level description? + - In case compiled code is added, is it integrated correctly via setup.py + (and preferably also Bento/Numscons configuration files)? + - If you are a first-time contributor, did you add yourself to THANKS.txt? + Please note that this is perfectly normal and desirable - the aim is to + give every single contributor credit, and if you don't add yourself it's + simply extra work for the reviewer (or worse, the reviewer may forget). + - Did you check that the code can be distributed under a BSD license? + + +Useful SciPy documents +`````````````````````` + + - The `how to document`_ guidelines + - NumPy/SciPy `testing guidelines`_ + - `SciPy API`_ + - SciPy `maintainers`_ + - NumPy/SciPy `git workflow`_ + + +FAQ +``` + +*I based my code on existing Matlab/R/... code I found online, is this OK?* + +It depends. SciPy is distributed under a BSD license, so if the code that you +based your code on is also BSD licensed or has a BSD-compatible license (MIT, +Apache, ...) then it's OK. Code which is GPL-licensed, has no clear license, +requires citation or is free for academic use only can't be included in SciPy. +Therefore if you copied existing code with such a license or made a direct +translation to Python of it, your code can't be included. See also `license +compatibility`_. + + +*How do I set up SciPy so I can edit files, run the tests and make commits?* + +The simplest method is setting up an in-place build. To create your local git +repo and do the in-place build:: + + $ git clone https://github.com/scipy/scipy.git scipy + $ cd scipy + $ python setup.py build_ext -i + +Then you need to either set up a symlink in your site-packages or add this +directory to your PYTHONPATH environment variable, so Python can find it. Some +IDEs (Spyder for example) have utilities to manage PYTHONPATH. On Linux and OS +X, you can for example edit your .bash_login file to automatically add this dir +on startup of your terminal. Add the line:: + + export PYTHONPATH="$HOME/scipy:${PYTHONPATH}" + +Alternatively, to set up the symlink, use (prefix only necessary if you want to +use your local instead of global site-packages dir):: + + $ python setupegg.py develop --prefix=${HOME} + +To test that everything works, start the interpreter (not inside the scipy/ +source dir) and run the tests:: + + $ python + >>> import scipy as sp + >>> sp.test() + +Now editing a Python source file in SciPy allows you to immediately test and +use your changes, by simply restarting the interpreter. + +Note that while the above procedure is the most straightforward way to get +started, you may want to look into using Bento or numscons for faster and more +flexible building, or virtualenv to maintain development environments for +multiple Python versions. + + +*How do I set up a development version of SciPy in parallel to a released +version that I use to do my job/research?* + +One simple way to achieve this is to install the released version in +site-packages, by using a binary installer or pip for example, and set up the +development version with an in-place build in a virtualenv. First install +`virtualenv`_ and `virtualenvwrapper`_, then create your virtualenv (named +scipy-dev here) with:: + + $ mkvirtualenv scipy-dev + +Now, whenever you want to switch to the virtual environment, you can use the +command ``workon scipy-dev``, while the command ``deactivate`` exits from the +virtual environment and brings back your previous shell. With scipy-dev +activated, follow the in-place build with the symlink install above to actually +install your development version of SciPy. + + +*Can I use a programming language other than Python to speed up my code?* + +Yes. The languages used in SciPy are Python, Cython, C, C++ and Fortran. All +of these have their pros and cons. If Python really doesn't offer enough +performance, one of those languages can be used. Important concerns when +using compiled languages are maintainability and portability. For +maintainability, Cython is clearly preferred over C/C++/Fortran. Cython and C +are more portable than C++/Fortran. A lot of the existing C and Fortran code +in SciPy is older, battle-tested code that was only wrapped in (but not +specifically written for) Python/SciPy. Therefore the basic advice is: use +Cython. If there's specific reasons why C/C++/Fortran should be preferred, +please discuss those reasons first. + + +*There's overlap between Trac and Github, which do I use for what?* + +Trac_ is the bug tracker, Github_ the code repository. Before the SciPy code +repository moved to Github, the preferred way to contribute code was to create +a patch and attach it to a Trac ticket. The overhead of this approach is much +larger than sending a PR on Github, so please don't do this anymore. Use Trac +for bug reports, Github for patches. + + +.. _scikit-learn: http://scikit-learn.org + +.. _scikits-image: http://scikits-image.org/ + +.. _statsmodels: http://statsmodels.sourceforge.net/ + +.. _testing guidelines: https://github.com/numpy/numpy/blob/master/doc/TESTS.rst.txt + +.. _how to document: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt + +.. _PEP8: http://www.python.org/dev/peps/pep-0008/ + +.. _pep8 package: http://pypi.python.org/pypi/pep8 + +.. _pyflakes: http://pypi.python.org/pypi/pyflakes + +.. _SciPy API: http://docs.scipy.org/doc/scipy/reference/api.html + +.. _git workflow: http://docs.scipy.org/doc/numpy/dev/gitwash/index.html + +.. _maintainers: https://github.com/scipy/scipy/blob/master/doc/MAINTAINERS.rst.txt + +.. _Trac: http://projects.scipy.org/scipy/timeline + +.. _Github: https://github.com/scipy/scipy + +.. _scipy.org: http://scipy.org/ + +.. _scipy.github.com: http://scipy.github.com/ + +.. _scipy.org-new: https://github.com/scipy/scipy.org-new + +.. _documentation wiki: http://docs.scipy.org/scipy/Front%20Page/ + +.. _SciPy Central: http://scipy-central.org/ + +.. _license compatibility: http://www.scipy.org/License_Compatibility + +.. _doctest: http://www.doughellmann.com/PyMOTW/doctest/ + +.. _virtualenv: http://www.virtualenv.org/ + +.. _virtualenvwrapper: http://www.doughellmann.com/projects/virtualenvwrapper/ + diff --git a/test/fixtures/Data/Modelines/ruby4 b/test/fixtures/Data/Modelines/ruby4 new file mode 100644 index 00000000..e3b50151 --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby4 @@ -0,0 +1,3 @@ +# vim: filetype=ruby + +# I am Ruby diff --git a/test/fixtures/Data/Modelines/ruby5 b/test/fixtures/Data/Modelines/ruby5 new file mode 100644 index 00000000..10349050 --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby5 @@ -0,0 +1,3 @@ +# vim: ft=ruby + +# I am Ruby diff --git a/test/fixtures/Data/Modelines/ruby6 b/test/fixtures/Data/Modelines/ruby6 new file mode 100644 index 00000000..a2b49dae --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby6 @@ -0,0 +1,3 @@ +# vim: syntax=Ruby + +# I am Ruby diff --git a/test/fixtures/Data/Modelines/ruby7 b/test/fixtures/Data/Modelines/ruby7 new file mode 100644 index 00000000..1ed5b28f --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby7 @@ -0,0 +1,3 @@ +# vim: se syntax=ruby: + +# I am Ruby diff --git a/test/fixtures/Data/Modelines/ruby8 b/test/fixtures/Data/Modelines/ruby8 new file mode 100644 index 00000000..8e854741 --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby8 @@ -0,0 +1,3 @@ +# vim: set syntax=ruby: + +# I am Ruby diff --git a/test/fixtures/Data/Modelines/ruby9 b/test/fixtures/Data/Modelines/ruby9 new file mode 100644 index 00000000..ac82358d --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby9 @@ -0,0 +1,3 @@ +# ex: syntax=ruby + +# I am Ruby diff --git a/test/test_generated.rb b/test/test_generated.rb index 164e1fc2..591f50ed 100644 --- a/test/test_generated.rb +++ b/test/test_generated.rb @@ -69,5 +69,11 @@ class TestGenerated < Minitest::Test # Specflow generated_fixture_without_loading_data("Features/BindingCulture.feature.cs") + + # JFlex + generated_sample_loading_data("Java/JFlexLexer.java") + + # GrammarKit + generated_sample_loading_data("Java/GrammarKit.java") end end diff --git a/test/test_grammars.rb b/test/test_grammars.rb index a2ad1e53..29211cc2 100644 --- a/test/test_grammars.rb +++ b/test/test_grammars.rb @@ -113,6 +113,20 @@ class TestGrammars < Minitest::Test assert_equal [], licensed, msg end + def test_submodules_use_https_links + File.open(".gitmodules", "r") do |fh| + ssh_submodules = [] + fh.each_line do |line| + if matches = line.match(/url = (git@.*)/) + submodule_link = matches.captures[0] + ssh_submodules.push(submodule_link) + end + end + msg = "The following submodules don't have an HTTPS link:\n* #{ssh_submodules.join("\n* ")}" + assert_equal [], ssh_submodules, msg + end + end + private def submodule_paths diff --git a/test/test_heuristics.rb b/test/test_heuristics.rb index 524a522a..702320e9 100644 --- a/test/test_heuristics.rb +++ b/test/test_heuristics.rb @@ -155,6 +155,14 @@ class TestHeuristcs < Minitest::Test }) end + # Candidate languages = ["Pod", "Perl"] + def test_pod_by_heuristics + assert_heuristics({ + "Perl" => all_fixtures("Perl", "*.pod"), + "Pod" => all_fixtures("Pod", "*.pod") + }) + end + # Candidate languages = ["IDL", "Prolog", "QMake", "INI"] def test_pro_by_heuristics assert_heuristics({ diff --git a/test/test_modelines.rb b/test/test_modelines.rb index fd259782..85718955 100644 --- a/test/test_modelines.rb +++ b/test/test_modelines.rb @@ -11,6 +11,12 @@ class TestModelines < Minitest::Test assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby") assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby2") assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby3") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby4") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby5") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby6") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby7") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby8") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby9") assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplus") assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs1") assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs2") diff --git a/vendor/grammars/AutoHotkey b/vendor/grammars/AutoHotkey index 77b8f2d7..4da62de2 160000 --- a/vendor/grammars/AutoHotkey +++ b/vendor/grammars/AutoHotkey @@ -1 +1 @@ -Subproject commit 77b8f2d7857e9251e3a6b9047f3eca5f76f0be43 +Subproject commit 4da62de23dc705bf9b95e76cf5e8e51aa1e68fea diff --git a/vendor/grammars/Elm.tmLanguage b/vendor/grammars/Elm.tmLanguage index ab79692f..437033bd 160000 --- a/vendor/grammars/Elm.tmLanguage +++ b/vendor/grammars/Elm.tmLanguage @@ -1 +1 @@ -Subproject commit ab79692fed628c9b08b7d11f21d9288cdf6b4e84 +Subproject commit 437033bd48350b49bc0dfa734206bfa0ba5de337 diff --git a/vendor/grammars/Handlebars b/vendor/grammars/Handlebars index 60309828..2e9f6884 160000 --- a/vendor/grammars/Handlebars +++ b/vendor/grammars/Handlebars @@ -1 +1 @@ -Subproject commit 6030982880f95b887daebf1ccb30ad1c615e0fc2 +Subproject commit 2e9f68840073f5a3de13cbfed10e31b199760654 diff --git a/vendor/grammars/MagicPython b/vendor/grammars/MagicPython new file mode 160000 index 00000000..82c76aff --- /dev/null +++ b/vendor/grammars/MagicPython @@ -0,0 +1 @@ +Subproject commit 82c76aff704192fb9ed1f505360635f575f13b5a diff --git a/vendor/grammars/NimLime b/vendor/grammars/NimLime index 4e60e565..4db349dd 160000 --- a/vendor/grammars/NimLime +++ b/vendor/grammars/NimLime @@ -1 +1 @@ -Subproject commit 4e60e5657fcddde6bf8b4bce0030ecb3154e7dfa +Subproject commit 4db349dda5219a37e99a0375e2a5d8a001fbf20e diff --git a/vendor/grammars/Sublime-Inform b/vendor/grammars/Sublime-Inform deleted file mode 160000 index 8db129b8..00000000 --- a/vendor/grammars/Sublime-Inform +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 8db129b8389044a6660ca232566651c8fe3ab646 diff --git a/vendor/grammars/SublimePapyrus b/vendor/grammars/SublimePapyrus index 75d567dc..293a45f6 160000 --- a/vendor/grammars/SublimePapyrus +++ b/vendor/grammars/SublimePapyrus @@ -1 +1 @@ -Subproject commit 75d567dc3a890acbc6edebedad2879b0af426766 +Subproject commit 293a45f665de3fb4e1eaaf37bf152b3861a70d7a diff --git a/vendor/grammars/abap.tmbundle b/vendor/grammars/abap.tmbundle index 5b7e30fd..e4e71dee 160000 --- a/vendor/grammars/abap.tmbundle +++ b/vendor/grammars/abap.tmbundle @@ -1 +1 @@ -Subproject commit 5b7e30fd8fb4bcaa78c14aaabebe98dda8ab7bbf +Subproject commit e4e71dee51b1fe6973e11d6a617de72991be4ecb diff --git a/vendor/grammars/atom-fsharp b/vendor/grammars/atom-fsharp index 500cdb5f..13eb4d43 160000 --- a/vendor/grammars/atom-fsharp +++ b/vendor/grammars/atom-fsharp @@ -1 +1 @@ -Subproject commit 500cdb5f045ff6a121fa2732bbd56c4da18bdae1 +Subproject commit 13eb4d4347be4e8dd825ba174e4271f9cb0f9cab diff --git a/vendor/grammars/atom-language-purescript b/vendor/grammars/atom-language-purescript index 21ed0c41..bd2b59f1 160000 --- a/vendor/grammars/atom-language-purescript +++ b/vendor/grammars/atom-language-purescript @@ -1 +1 @@ -Subproject commit 21ed0c417a143ca537afc5d65344acac3d88df94 +Subproject commit bd2b59f14eb4bd3b1f6d53eee959d7a6b523c073 diff --git a/vendor/grammars/atom-language-stan b/vendor/grammars/atom-language-stan new file mode 160000 index 00000000..f8d855ea --- /dev/null +++ b/vendor/grammars/atom-language-stan @@ -0,0 +1 @@ +Subproject commit f8d855eab960b4dd140c0f469a809401544850b8 diff --git a/vendor/grammars/ats.sublime b/vendor/grammars/ats.sublime index 1ecc2b01..a3f24abb 160000 --- a/vendor/grammars/ats.sublime +++ b/vendor/grammars/ats.sublime @@ -1 +1 @@ -Subproject commit 1ecc2b011ed9d58bd5435b7440697e300591a23d +Subproject commit a3f24abbe7043adc0ad798711467edae33cf89f0 diff --git a/vendor/grammars/c.tmbundle b/vendor/grammars/c.tmbundle index f6048afe..5b5d3663 160000 --- a/vendor/grammars/c.tmbundle +++ b/vendor/grammars/c.tmbundle @@ -1 +1 @@ -Subproject commit f6048afe693e50adf47d46aba791f95c9138823e +Subproject commit 5b5d366309e8a549ce6fff5bcd9aa57ffb6dae77 diff --git a/vendor/grammars/chapel-tmbundle b/vendor/grammars/chapel-tmbundle index b657738c..469476b2 160000 --- a/vendor/grammars/chapel-tmbundle +++ b/vendor/grammars/chapel-tmbundle @@ -1 +1 @@ -Subproject commit b657738cc05cddd8d8a20afa307e93e607d17d97 +Subproject commit 469476b285adf6c4a09973fd12e97ec831afd050 diff --git a/vendor/grammars/d.tmbundle b/vendor/grammars/d.tmbundle index e6551b27..bc27d5e5 160000 --- a/vendor/grammars/d.tmbundle +++ b/vendor/grammars/d.tmbundle @@ -1 +1 @@ -Subproject commit e6551b27e30960a272758afb7da323aa402d70e6 +Subproject commit bc27d5e552b28eaceb76cfe93bd7c27a1105b57d diff --git a/vendor/grammars/elixir-tmbundle b/vendor/grammars/elixir-tmbundle index 4b502e43..a7aa16e3 160000 --- a/vendor/grammars/elixir-tmbundle +++ b/vendor/grammars/elixir-tmbundle @@ -1 +1 @@ -Subproject commit 4b502e436d150f6af97b085a763179941b3c3778 +Subproject commit a7aa16e349f673d70dd43fcab72e2a3f219cb9d7 diff --git a/vendor/grammars/factor b/vendor/grammars/factor index 9d956025..97d1ec75 160000 --- a/vendor/grammars/factor +++ b/vendor/grammars/factor @@ -1 +1 @@ -Subproject commit 9d95602591cf231a3f2332269d81d60068d1aa76 +Subproject commit 97d1ec759eb9fa2ace83c62685b6b36faec05981 diff --git a/vendor/grammars/gap-tmbundle b/vendor/grammars/gap-tmbundle index ce4ff698..52c8fafb 160000 --- a/vendor/grammars/gap-tmbundle +++ b/vendor/grammars/gap-tmbundle @@ -1 +1 @@ -Subproject commit ce4ff698ebb4a692f0b9a3ab5e30853b87fd2b34 +Subproject commit 52c8fafb664fb7909223f92403e26fe3bfde0cdc diff --git a/vendor/grammars/html.tmbundle b/vendor/grammars/html.tmbundle index a0bc0c47..7710c79b 160000 --- a/vendor/grammars/html.tmbundle +++ b/vendor/grammars/html.tmbundle @@ -1 +1 @@ -Subproject commit a0bc0c479bde0d46bfec863f3262c06e768eec2d +Subproject commit 7710c79b161314fe937b05b13b1101e78f5dc62e diff --git a/vendor/grammars/json.tmbundle b/vendor/grammars/json.tmbundle index 06b38d55..0762cbdc 160000 --- a/vendor/grammars/json.tmbundle +++ b/vendor/grammars/json.tmbundle @@ -1 +1 @@ -Subproject commit 06b38d55326363b63c26943c6e0213988180dbca +Subproject commit 0762cbdcb34dd98801b6323e75332cd4c9dbc07e diff --git a/vendor/grammars/language-babel b/vendor/grammars/language-babel index 4b0e9658..e2fd09d7 160000 --- a/vendor/grammars/language-babel +++ b/vendor/grammars/language-babel @@ -1 +1 @@ -Subproject commit 4b0e9658e0a8bab71983ea5941f99b552cf00848 +Subproject commit e2fd09d7d9caf1f336b3a35581e55387723dcbf3 diff --git a/vendor/grammars/language-clojure b/vendor/grammars/language-clojure index 12b73d41..a0193ad2 160000 --- a/vendor/grammars/language-clojure +++ b/vendor/grammars/language-clojure @@ -1 +1 @@ -Subproject commit 12b73d41a0e0437b4899f73c5ac65caad74ac2c1 +Subproject commit a0193ad2a9797033649e665083f09249d2d098fc diff --git a/vendor/grammars/language-coffee-script b/vendor/grammars/language-coffee-script index afa4f6f1..93e935ec 160000 --- a/vendor/grammars/language-coffee-script +++ b/vendor/grammars/language-coffee-script @@ -1 +1 @@ -Subproject commit afa4f6f157e02fd1a635357e8117e4c726a84542 +Subproject commit 93e935ec1d54815c182d683d3dfb9583db4e269b diff --git a/vendor/grammars/language-crystal b/vendor/grammars/language-crystal index 5c0e60fd..f87d6864 160000 --- a/vendor/grammars/language-crystal +++ b/vendor/grammars/language-crystal @@ -1 +1 @@ -Subproject commit 5c0e60fd5323b5f093317ffda9721db3fcd72672 +Subproject commit f87d68645c9a4243a7942e6b414449f1eec8a3e7 diff --git a/vendor/grammars/language-gfm b/vendor/grammars/language-gfm index e5b24d57..298a8a3e 160000 --- a/vendor/grammars/language-gfm +++ b/vendor/grammars/language-gfm @@ -1 +1 @@ -Subproject commit e5b24d57e73463f747fae061d6e9d59a74b9f927 +Subproject commit 298a8a3eb180f1fa6b8a8bc77c2147e355c8cafd diff --git a/vendor/grammars/language-inform7 b/vendor/grammars/language-inform7 new file mode 160000 index 00000000..857864cd --- /dev/null +++ b/vendor/grammars/language-inform7 @@ -0,0 +1 @@ +Subproject commit 857864cd812279672284f424da4f65e3c81b6c87 diff --git a/vendor/grammars/language-javascript b/vendor/grammars/language-javascript index b227486f..f68e4bfe 160000 --- a/vendor/grammars/language-javascript +++ b/vendor/grammars/language-javascript @@ -1 +1 @@ -Subproject commit b227486fc84c8af8f7439e96424c2f3724c26e44 +Subproject commit f68e4bfe54a3b9d16450223f401d2fb16453897f diff --git a/vendor/grammars/language-jsoniq b/vendor/grammars/language-jsoniq index 7a971acf..008a2458 160000 --- a/vendor/grammars/language-jsoniq +++ b/vendor/grammars/language-jsoniq @@ -1 +1 @@ -Subproject commit 7a971acf1c2001e6b2e9afc7b69b6ff8a2ae39ce +Subproject commit 008a24589d32a8f8bbd55985d27844a7cda842e5 diff --git a/vendor/grammars/language-less b/vendor/grammars/language-less new file mode 160000 index 00000000..a4ded260 --- /dev/null +++ b/vendor/grammars/language-less @@ -0,0 +1 @@ +Subproject commit a4ded2608cd22b8dba79df2025ad8dcf62158ddb diff --git a/vendor/grammars/language-maxscript b/vendor/grammars/language-maxscript index a465c9ca..56f89d27 160000 --- a/vendor/grammars/language-maxscript +++ b/vendor/grammars/language-maxscript @@ -1 +1 @@ -Subproject commit a465c9ca4adf71b8524021acb3cbe447db19753b +Subproject commit 56f89d27153d27f3fed5f8b50fda8fa728fa2911 diff --git a/vendor/grammars/language-python b/vendor/grammars/language-python index cdb699e7..75f0d2b0 160000 --- a/vendor/grammars/language-python +++ b/vendor/grammars/language-python @@ -1 +1 @@ -Subproject commit cdb699e7a86fd9f9f84ae561abddb696aad777aa +Subproject commit 75f0d2b06122a51db6e8e0b129b57585cd68f99c diff --git a/vendor/grammars/language-renpy b/vendor/grammars/language-renpy index 00e92d74..cc2f1c69 160000 --- a/vendor/grammars/language-renpy +++ b/vendor/grammars/language-renpy @@ -1 +1 @@ -Subproject commit 00e92d7450a97c33b40931113b64034bac27e010 +Subproject commit cc2f1c69f0b1c1d121aa5648422fc70d86dca7cf diff --git a/vendor/grammars/language-yaml b/vendor/grammars/language-yaml index e1d62e5a..249fdeed 160000 --- a/vendor/grammars/language-yaml +++ b/vendor/grammars/language-yaml @@ -1 +1 @@ -Subproject commit e1d62e5aff1c475ea3eedc3b03a52ce0e750ec89 +Subproject commit 249fdeed7877ccdcba123645f32cc6597bce4b37 diff --git a/vendor/grammars/language-yang b/vendor/grammars/language-yang new file mode 160000 index 00000000..c2d4a701 --- /dev/null +++ b/vendor/grammars/language-yang @@ -0,0 +1 @@ +Subproject commit c2d4a701d15e0ea7a1cef4f2fe4396b336504d7c diff --git a/vendor/grammars/latex.tmbundle b/vendor/grammars/latex.tmbundle index bb4edc2b..82986b93 160000 --- a/vendor/grammars/latex.tmbundle +++ b/vendor/grammars/latex.tmbundle @@ -1 +1 @@ -Subproject commit bb4edc2b6af0d95c2084511ce3afc324a9c83da5 +Subproject commit 82986b93a4f4ae7aab52445d8b7742b9af635d05 diff --git a/vendor/grammars/less.tmbundle b/vendor/grammars/less.tmbundle deleted file mode 160000 index 7ef97ad5..00000000 --- a/vendor/grammars/less.tmbundle +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 7ef97ad5f15d2a136afe4d5cf568fc8ee79675b7 diff --git a/vendor/grammars/make.tmbundle b/vendor/grammars/make.tmbundle index e0d96dc1..1a1827da 160000 --- a/vendor/grammars/make.tmbundle +++ b/vendor/grammars/make.tmbundle @@ -1 +1 @@ -Subproject commit e0d96dc1d6ec8ef9ee421da9963d231256a2f22d +Subproject commit 1a1827da81e20fdce56e2658451340c070ca44b7 diff --git a/vendor/grammars/objective-c.tmbundle b/vendor/grammars/objective-c.tmbundle index 1bade8a1..fdcedb95 160000 --- a/vendor/grammars/objective-c.tmbundle +++ b/vendor/grammars/objective-c.tmbundle @@ -1 +1 @@ -Subproject commit 1bade8a1c919c358fc4a6d83ba93e98e419ffede +Subproject commit fdcedb95de8846220c49f769fee91045188767d9 diff --git a/vendor/grammars/perl6fe b/vendor/grammars/perl6fe new file mode 160000 index 00000000..12fa46f7 --- /dev/null +++ b/vendor/grammars/perl6fe @@ -0,0 +1 @@ +Subproject commit 12fa46f7bbf646616df1120ed8cfee3e1fcb75de diff --git a/vendor/grammars/php.tmbundle b/vendor/grammars/php.tmbundle index 2ecaa60d..3ed4837b 160000 --- a/vendor/grammars/php.tmbundle +++ b/vendor/grammars/php.tmbundle @@ -1 +1 @@ -Subproject commit 2ecaa60d92b92d4c07f243207ba1d5b2114bb70a +Subproject commit 3ed4837b43d3f650ebb525b068636281942883a0 diff --git a/vendor/grammars/powershell b/vendor/grammars/powershell index 982ae21d..b4ea52c5 160000 --- a/vendor/grammars/powershell +++ b/vendor/grammars/powershell @@ -1 +1 @@ -Subproject commit 982ae21d54b3affc498131515ebbfca6b186ac16 +Subproject commit b4ea52c51c5111fdda1d24103082d9580472c31b diff --git a/vendor/grammars/sas.tmbundle b/vendor/grammars/sas.tmbundle index 30fa23fc..3759a197 160000 --- a/vendor/grammars/sas.tmbundle +++ b/vendor/grammars/sas.tmbundle @@ -1 +1 @@ -Subproject commit 30fa23fc34cf5147bcfd0759a4cbf83cd987337d +Subproject commit 3759a19719d3c4c4979087be12adbcaa02a7bca3 diff --git a/vendor/grammars/sql.tmbundle b/vendor/grammars/sql.tmbundle index 1fd3f03f..6d4edbc1 160000 --- a/vendor/grammars/sql.tmbundle +++ b/vendor/grammars/sql.tmbundle @@ -1 +1 @@ -Subproject commit 1fd3f03f97e37fde2c7f2dd4c11728a19242d320 +Subproject commit 6d4edbc113d3272f7c097d6b1504624289ee2bc5 diff --git a/vendor/grammars/sublime-mask b/vendor/grammars/sublime-mask index 819f743e..8b2a4b33 160000 --- a/vendor/grammars/sublime-mask +++ b/vendor/grammars/sublime-mask @@ -1 +1 @@ -Subproject commit 819f743efdcfa0d90a5b935cfaad799bd4ca7e6a +Subproject commit 8b2a4b3300ef30f293218521cd2aa94335cad114 diff --git a/vendor/grammars/sublime-nginx b/vendor/grammars/sublime-nginx index ba28d29d..fcf644ec 160000 --- a/vendor/grammars/sublime-nginx +++ b/vendor/grammars/sublime-nginx @@ -1 +1 @@ -Subproject commit ba28d29de729f01d39a29fa214a6818112b48803 +Subproject commit fcf644ecea021ab8a6bc171f415f8df0b005b31e diff --git a/vendor/grammars/sublime-pony b/vendor/grammars/sublime-pony index 384ba3ed..b5eb8f8e 160000 --- a/vendor/grammars/sublime-pony +++ b/vendor/grammars/sublime-pony @@ -1 +1 @@ -Subproject commit 384ba3ed980189f0cc140d3fb86455f880fffeb0 +Subproject commit b5eb8f8e97e5253de8d81cfa4826cfb5815f2944 diff --git a/vendor/grammars/sublime-rust b/vendor/grammars/sublime-rust index 06a278cc..621e4f61 160000 --- a/vendor/grammars/sublime-rust +++ b/vendor/grammars/sublime-rust @@ -1 +1 @@ -Subproject commit 06a278ccfaf5e542d26a95d66c734b4407bc4912 +Subproject commit 621e4f6117531d8fe299eb5584a6be766df1822e diff --git a/vendor/grammars/sublime-text-ox b/vendor/grammars/sublime-text-ox index ed96fb6a..5cbc2c65 160000 --- a/vendor/grammars/sublime-text-ox +++ b/vendor/grammars/sublime-text-ox @@ -1 +1 @@ -Subproject commit ed96fb6afc0321c7d3ce219d69b56c591f0938a0 +Subproject commit 5cbc2c655b4b771d34b75bbe962964455510644b diff --git a/vendor/grammars/sublime-typescript b/vendor/grammars/sublime-typescript index 6540de45..26fd717a 160000 --- a/vendor/grammars/sublime-typescript +++ b/vendor/grammars/sublime-typescript @@ -1 +1 @@ -Subproject commit 6540de452eb08766d379fe10aba4bce9eb645ec0 +Subproject commit 26fd717a79d1984e76bbe6d958c5c4bbf0179049 diff --git a/vendor/grammars/sublime_cobol b/vendor/grammars/sublime_cobol index 19ae2ba3..3d2b6dbc 160000 --- a/vendor/grammars/sublime_cobol +++ b/vendor/grammars/sublime_cobol @@ -1 +1 @@ -Subproject commit 19ae2ba33ebc0b439051a499888acd4f916f83b9 +Subproject commit 3d2b6dbcd1b27023150ff9d8ab47953706d070b8 diff --git a/vendor/grammars/swift.tmbundle b/vendor/grammars/swift.tmbundle index 75a3dea1..32141e91 160000 --- a/vendor/grammars/swift.tmbundle +++ b/vendor/grammars/swift.tmbundle @@ -1 +1 @@ -Subproject commit 75a3dea1841cd153c71101fa30841a269ab3c6a7 +Subproject commit 32141e91eeffbbd29482d5b1a0131eec4613e73e diff --git a/vendor/grammars/verilog.tmbundle b/vendor/grammars/verilog.tmbundle index 7627ae50..f55a9859 160000 --- a/vendor/grammars/verilog.tmbundle +++ b/vendor/grammars/verilog.tmbundle @@ -1 +1 @@ -Subproject commit 7627ae507278edd9534c3f905d48736875658bf5 +Subproject commit f55a9859f48bb6189992b86748c127044131a785 diff --git a/vendor/grammars/vue-syntax-highlight b/vendor/grammars/vue-syntax-highlight index a2336ddc..f20c9bab 160000 --- a/vendor/grammars/vue-syntax-highlight +++ b/vendor/grammars/vue-syntax-highlight @@ -1 +1 @@ -Subproject commit a2336ddc7ea01876caa31793f806250cad3b53e6 +Subproject commit f20c9bab7e71738f421e6edc1aab8839ee05d85a