From 1b327e29baf00b63c34a00f4118141fb30827d8c Mon Sep 17 00:00:00 2001 From: ismail-arilik Date: Tue, 1 Sep 2015 21:51:48 +0300 Subject: [PATCH 01/68] Add colors for some languages. These are effected languages and the reason behind the proposed colors: - Ant Build System: The color of logo (http://ant.apache.org/images/project-logo.gif). - AppleScript: AppleScript editor logo (https://en.wikipedia.org/wiki/AppleScript#/media/File:AppleScript_Editor_Logo.png). - Batchfile: Batch file icon in Windows (https://en.wikipedia.org/wiki/Batch_file#/media/File:Batch_file_icon_in_Windows_Vista.png). - Bison: A color taken from a bison (https://en.wikipedia.org/wiki/Bison#/media/File:Americanbison.jpg). - Cucumber: Official logo color (https://cucumber.io/images/cucumber-logo.svg). - Cuda: Nvidia(creator of CUDA) logo color (http://www.nvidia.com/content/includes/redesign2010/images/redesign10/nvidia_logo.png). - Gradle: Official Gradle logo color (https://gradle.org/wp-content/uploads/2015/03/GradleLogoReg.png). - Hack: Hack logo color (http://hacklang.org/wp-content/themes/hack/hack.png). - Haml: Haml logo color (http://haml.info/images/haml.png). - LLVM: Eye color of the dragon logo of LLVM (http://llvm.org/img/DragonMedium.png). - Less: Less logo color (http://lesscss.org/public/img/logo.png). - Markdown: The Daring Fireball logo color (http://daringfireball.net/graphics/logos/). - Maven POM: The maven logo color (https://en.wikipedia.org/wiki/Apache_Maven#/media/File:Maven_logo.svg). - Nginx: The nginx logo color (http://nginx.org/nginx.png). - NumPy: The NumPy logo color (http://www.numpy.org/_static/numpy_logo.png). - RDoc: I couldn'd find any logo, so have used the color of the name of RDoc in the official site (http://docs.seattlerb.org/rdoc/). - SCSS: The Sass logo color (http://sass-lang.com/assets/img/logos/logo-b6e1ef6e.svg). - Sass: The Sass logo color (http://sass-lang.com/assets/img/logos/logo-b6e1ef6e.svg). - XML: A random color. - XSLT: A random color. - YAML: The color of the name of YAML in the official site (http://yaml.org/). - Yacc: A random color. - reStructuredText: The official logo color (http://docutils.sourceforge.net/rst.png). --- lib/linguist/languages.yml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 4d83e963..ae0584a1 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -153,6 +153,7 @@ Ant Build System: - ant.xml - build.xml ace_mode: xml + color: "#A82C7C" ApacheConf: type: markup @@ -182,6 +183,7 @@ AppleScript: interpreters: - osascript ace_mode: applescript + color: "#F2F1F1" Arc: type: programming @@ -289,6 +291,7 @@ Batchfile: - .cmd tm_scope: source.dosbatch ace_mode: batchfile + color: "#92C2FF" Befunge: type: programming @@ -303,6 +306,7 @@ Bison: extensions: - .bison ace_mode: text + color: "#6A463F" BitBake: type: programming @@ -690,6 +694,7 @@ Cucumber: aliases: - gherkin ace_mode: text + color: "#00A818" Cuda: type: programming @@ -698,6 +703,7 @@ Cuda: - .cuh tm_scope: source.cuda-c++ ace_mode: c_cpp + color: "#76B900" Cycript: type: programming @@ -1204,6 +1210,7 @@ Gradle: - .gradle tm_scope: source.groovy.gradle ace_mode: text + color: "#84BA40" Grammatical Framework: type: programming @@ -1355,6 +1362,7 @@ Hack: - .hh - .php tm_scope: text.html.php + color: "#878787" Haml: group: HTML @@ -1363,6 +1371,7 @@ Haml: - .haml - .haml.deface ace_mode: haml + color: "#ECE2A9" Handlebars: type: markup @@ -1705,6 +1714,7 @@ LLVM: extensions: - .ll ace_mode: text + color: "#689DD7" LOLCODE: type: programming @@ -1768,6 +1778,7 @@ Less: - .less tm_scope: source.css.less ace_mode: less + color: "#2A4D82" Lex: type: programming @@ -1977,6 +1988,7 @@ Markdown: - .mkdown - .ron tm_scope: source.gfm + color: "#DDDDDD" Mask: type: markup @@ -2015,6 +2027,7 @@ Maven POM: filenames: - pom.xml ace_mode: xml + color: "#FF6804" Max: type: programming @@ -2204,6 +2217,7 @@ Nginx: aliases: - nginx configuration file ace_mode: text + color: "#009900" Nimrod: type: programming @@ -2262,6 +2276,7 @@ NumPy: - .numsc tm_scope: none ace_mode: text + color: "#378EC8" OCaml: type: programming @@ -2794,6 +2809,7 @@ RDoc: extensions: - .rdoc tm_scope: text.rdoc + color: "#333333" REALbasic: type: programming @@ -2991,6 +3007,7 @@ SCSS: ace_mode: scss extensions: - .scss + color: "#CF649A" SMT: type: programming @@ -3093,6 +3110,7 @@ Sass: extensions: - .sass ace_mode: sass + color: "#CF649A" Scala: type: programming @@ -3676,6 +3694,7 @@ XML: - Web.Release.config - Web.config - packages.config + color: "#25AAE2" XPages: type: programming @@ -3721,6 +3740,7 @@ XSLT: - .xsl tm_scope: text.xml.xsl ace_mode: xml + color: "#0E76BD" Xojo: type: programming @@ -3753,6 +3773,7 @@ YAML: - .yaml - .yaml-tmlanguage ace_mode: yaml + color: "#FF0000" Yacc: type: programming @@ -3762,6 +3783,7 @@ Yacc: - .yy tm_scope: source.bison ace_mode: text + color: "#92278F" Zephir: type: programming @@ -3844,6 +3866,7 @@ reStructuredText: - .rst - .rest ace_mode: text + color: "#000000" wisp: type: programming From 9e3d8ac4e919d1eff026e94cbb9c69a21a25f9d0 Mon Sep 17 00:00:00 2001 From: ismail-arilik Date: Wed, 2 Sep 2015 09:58:14 +0300 Subject: [PATCH 02/68] Update Batchfile color. --- lib/linguist/languages.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index ae0584a1..157ba092 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -291,7 +291,7 @@ Batchfile: - .cmd tm_scope: source.dosbatch ace_mode: batchfile - color: "#92C2FF" + color: "#A3D300" Befunge: type: programming From 827ad80311fe24b4cabd76769c8ccff584a31557 Mon Sep 17 00:00:00 2001 From: ismail-arilik Date: Wed, 2 Sep 2015 10:16:37 +0300 Subject: [PATCH 03/68] Update colors to fix collision. Colors basically incremented. --- lib/linguist/languages.yml | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 157ba092..be44415d 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -631,7 +631,7 @@ Common Lisp: Component Pascal: type: programming - color: "#b0ce4e" + color: "#c1df5f" extensions: - .cp - .cps @@ -703,7 +703,7 @@ Cuda: - .cuh tm_scope: source.cuda-c++ ace_mode: c_cpp - color: "#76B900" + color: "#87CA11" Cycript: type: programming @@ -1210,7 +1210,7 @@ Gradle: - .gradle tm_scope: source.groovy.gradle ace_mode: text - color: "#84BA40" + color: "#95CB51" Grammatical Framework: type: programming @@ -1714,7 +1714,7 @@ LLVM: extensions: - .ll ace_mode: text - color: "#689DD7" + color: "#79AEE8" LOLCODE: type: programming @@ -1778,7 +1778,7 @@ Less: - .less tm_scope: source.css.less ace_mode: less - color: "#2A4D82" + color: "#3B5E93" Lex: type: programming @@ -1988,7 +1988,7 @@ Markdown: - .mkdown - .ron tm_scope: source.gfm - color: "#DDDDDD" + color: "#EEEEEE" Mask: type: markup @@ -2027,7 +2027,7 @@ Maven POM: filenames: - pom.xml ace_mode: xml - color: "#FF6804" + color: "#007915" Max: type: programming @@ -2217,7 +2217,7 @@ Nginx: aliases: - nginx configuration file ace_mode: text - color: "#009900" + color: "#11AA11" Nimrod: type: programming @@ -2276,7 +2276,7 @@ NumPy: - .numsc tm_scope: none ace_mode: text - color: "#378EC8" + color: "#489FD9" OCaml: type: programming @@ -2779,7 +2779,7 @@ QMake: R: type: programming - color: "#198ce7" + color: "#2A9DF8" aliases: - R - Rscript @@ -3694,7 +3694,7 @@ XML: - Web.Release.config - Web.config - packages.config - color: "#25AAE2" + color: "#36BBF3" XPages: type: programming @@ -3740,7 +3740,7 @@ XSLT: - .xsl tm_scope: text.xml.xsl ace_mode: xml - color: "#0E76BD" + color: "#1F87CE" Xojo: type: programming @@ -3773,7 +3773,7 @@ YAML: - .yaml - .yaml-tmlanguage ace_mode: yaml - color: "#FF0000" + color: "#001111" Yacc: type: programming @@ -3783,7 +3783,7 @@ Yacc: - .yy tm_scope: source.bison ace_mode: text - color: "#92278F" + color: "#A33890" Zephir: type: programming @@ -3866,7 +3866,7 @@ reStructuredText: - .rst - .rest ace_mode: text - color: "#000000" + color: "#444444" wisp: type: programming From dc41dd888d4f6b3754fe6461d683360d2ed212d4 Mon Sep 17 00:00:00 2001 From: ismail-arilik Date: Wed, 2 Sep 2015 13:31:51 +0300 Subject: [PATCH 04/68] Update Cuda color to fix closeness. --- lib/linguist/languages.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index be44415d..3e0d1f8c 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -703,7 +703,7 @@ Cuda: - .cuh tm_scope: source.cuda-c++ ace_mode: c_cpp - color: "#87CA11" + color: "#98DB22" Cycript: type: programming From 9a76cfc85f8a30a79bcedefcb66e4f676f8a7b5a Mon Sep 17 00:00:00 2001 From: ismail-arilik Date: Wed, 2 Sep 2015 14:16:12 +0300 Subject: [PATCH 05/68] Update some colors to fix closeness. I have used syntactic color values with respect to the table below: A | B C | D | E | F G | H | I | J K | L M | N O | P Q | R S | T U | V W | X | Y Z 0 1 2 3 4 5 6 7 8 9 A B C D E F For example for Gradle the color should be 4B0283, for Cuda the color should be 1C201C(for six-letter CudaCu) and for reStructuredText the color should be B3BCBC(for six-letter reStru). --- lib/linguist/languages.yml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 3e0d1f8c..40ac5b40 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -703,7 +703,7 @@ Cuda: - .cuh tm_scope: source.cuda-c++ ace_mode: c_cpp - color: "#98DB22" + color: "#1C201C" Cycript: type: programming @@ -1210,7 +1210,7 @@ Gradle: - .gradle tm_scope: source.groovy.gradle ace_mode: text - color: "#95CB51" + color: "#4B0283" Grammatical Framework: type: programming @@ -1778,7 +1778,7 @@ Less: - .less tm_scope: source.css.less ace_mode: less - color: "#3B5E93" + color: "#83BB83" Lex: type: programming @@ -1988,7 +1988,7 @@ Markdown: - .mkdown - .ron tm_scope: source.gfm - color: "#EEEEEE" + color: "#80B729" Mask: type: markup @@ -2217,7 +2217,7 @@ Nginx: aliases: - nginx configuration file ace_mode: text - color: "#11AA11" + color: "#9469E9" Nimrod: type: programming @@ -2276,7 +2276,7 @@ NumPy: - .numsc tm_scope: none ace_mode: text - color: "#489FD9" + color: "#9C8AF9" OCaml: type: programming @@ -3694,7 +3694,7 @@ XML: - Web.Release.config - Web.config - packages.config - color: "#36BBF3" + color: "#E88E88" XPages: type: programming @@ -3740,7 +3740,7 @@ XSLT: - .xsl tm_scope: text.xml.xsl ace_mode: xml - color: "#1F87CE" + color: "#EB8CEB" Xojo: type: programming @@ -3783,7 +3783,7 @@ Yacc: - .yy tm_scope: source.bison ace_mode: text - color: "#A33890" + color: "#F011F0" Zephir: type: programming @@ -3866,7 +3866,7 @@ reStructuredText: - .rst - .rest ace_mode: text - color: "#444444" + color: "#B3BCBC" wisp: type: programming From 802de8112ccb3e9cd5696efe9dc5a2b8e6f9a3b9 Mon Sep 17 00:00:00 2001 From: ismail-arilik Date: Wed, 2 Sep 2015 14:39:19 +0300 Subject: [PATCH 06/68] Update some colors to fix closeness. --- lib/linguist/languages.yml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 40ac5b40..2ccff3d2 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -153,7 +153,7 @@ Ant Build System: - ant.xml - build.xml ace_mode: xml - color: "#A82C7C" + color: "#C64A9A" ApacheConf: type: markup @@ -183,7 +183,7 @@ AppleScript: interpreters: - osascript ace_mode: applescript - color: "#F2F1F1" + color: "#101F1F" Arc: type: programming @@ -291,7 +291,7 @@ Batchfile: - .cmd tm_scope: source.dosbatch ace_mode: batchfile - color: "#A3D300" + color: "#C1F12E" Befunge: type: programming @@ -631,7 +631,7 @@ Common Lisp: Component Pascal: type: programming - color: "#c1df5f" + color: "#EFFD7D" extensions: - .cp - .cps @@ -694,7 +694,7 @@ Cucumber: aliases: - gherkin ace_mode: text - color: "#00A818" + color: "#2EC636" Cuda: type: programming @@ -703,7 +703,7 @@ Cuda: - .cuh tm_scope: source.cuda-c++ ace_mode: c_cpp - color: "#1C201C" + color: "#3A4E3A" Cycript: type: programming @@ -1714,7 +1714,7 @@ LLVM: extensions: - .ll ace_mode: text - color: "#79AEE8" + color: "#97CC06" LOLCODE: type: programming @@ -1778,7 +1778,7 @@ Less: - .less tm_scope: source.css.less ace_mode: less - color: "#83BB83" + color: "#A1D9A1" Lex: type: programming @@ -1988,7 +1988,7 @@ Markdown: - .mkdown - .ron tm_scope: source.gfm - color: "#80B729" + color: "#AED547" Mask: type: markup @@ -2525,7 +2525,7 @@ Parrot Internal Representation: Pascal: type: programming - color: "#b0ce4e" + color: "#DEEC6C" extensions: - .pas - .dfm @@ -2779,7 +2779,7 @@ QMake: R: type: programming - color: "#2A9DF8" + color: "#198CE7" aliases: - R - Rscript @@ -2809,7 +2809,7 @@ RDoc: extensions: - .rdoc tm_scope: text.rdoc - color: "#333333" + color: "#515151" REALbasic: type: programming @@ -3783,7 +3783,7 @@ Yacc: - .yy tm_scope: source.bison ace_mode: text - color: "#F011F0" + color: "#1E3F1E" Zephir: type: programming From 35884d482cc6b4813d9e6a4787a8cfd21ffea84d Mon Sep 17 00:00:00 2001 From: ismail-arilik Date: Wed, 2 Sep 2015 15:38:41 +0300 Subject: [PATCH 07/68] Update some colors to fix closeness. --- lib/linguist/languages.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 2ccff3d2..49aa2c75 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -694,7 +694,7 @@ Cucumber: aliases: - gherkin ace_mode: text - color: "#2EC636" + color: "#2EF336" Cuda: type: programming @@ -1714,7 +1714,7 @@ LLVM: extensions: - .ll ace_mode: text - color: "#97CC06" + color: "#185619" LOLCODE: type: programming @@ -1988,7 +1988,7 @@ Markdown: - .mkdown - .ron tm_scope: source.gfm - color: "#AED547" + color: "#DB0274" Mask: type: markup @@ -2525,7 +2525,7 @@ Parrot Internal Representation: Pascal: type: programming - color: "#DEEC6C" + color: "#0B1999" extensions: - .pas - .dfm @@ -2809,7 +2809,7 @@ RDoc: extensions: - .rdoc tm_scope: text.rdoc - color: "#515151" + color: "#8E84BF" REALbasic: type: programming @@ -3773,7 +3773,7 @@ YAML: - .yaml - .yaml-tmlanguage ace_mode: yaml - color: "#001111" + color: "#56789A" Yacc: type: programming @@ -3783,7 +3783,7 @@ Yacc: - .yy tm_scope: source.bison ace_mode: text - color: "#1E3F1E" + color: "#4B6C4B" Zephir: type: programming From ee61466042069db8a649a3e13444c3d079f1a685 Mon Sep 17 00:00:00 2001 From: ismail-arilik Date: Wed, 2 Sep 2015 15:58:46 +0300 Subject: [PATCH 08/68] Update some colors to fix closeness. --- lib/linguist/languages.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 49aa2c75..4094b18e 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -694,7 +694,7 @@ Cucumber: aliases: - gherkin ace_mode: text - color: "#2EF336" + color: "#5B2063" Cuda: type: programming @@ -1988,7 +1988,7 @@ Markdown: - .mkdown - .ron tm_scope: source.gfm - color: "#DB0274" + color: "#083FA1" Mask: type: markup @@ -2525,7 +2525,7 @@ Parrot Internal Representation: Pascal: type: programming - color: "#0B1999" + color: "#3846C6" extensions: - .pas - .dfm From cf834e8a2176d4dfaaa5e7aaf4c706c6ac756c15 Mon Sep 17 00:00:00 2001 From: ismail-arilik Date: Wed, 2 Sep 2015 16:23:33 +0300 Subject: [PATCH 09/68] Update some colors to fix closeness. --- lib/linguist/languages.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 4094b18e..87059633 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -631,7 +631,7 @@ Common Lisp: Component Pascal: type: programming - color: "#EFFD7D" + color: "#B0CE4E" extensions: - .cp - .cps @@ -2525,7 +2525,7 @@ Parrot Internal Representation: Pascal: type: programming - color: "#3846C6" + color: "#C1DF5F" extensions: - .pas - .dfm From 7e76d1cc6bafbf3357eaf8025b1836ac8744fd06 Mon Sep 17 00:00:00 2001 From: ismail-arilik Date: Wed, 2 Sep 2015 17:01:38 +0300 Subject: [PATCH 10/68] Update Pascal color to fix the closeness issue. --- lib/linguist/languages.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 87059633..ad9ac132 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -2525,7 +2525,7 @@ Parrot Internal Representation: Pascal: type: programming - color: "#C1DF5F" + color: "#E3F171" extensions: - .pas - .dfm From fc5ae1cfbcad8efa8fce84b4592fadf6b65fbfb2 Mon Sep 17 00:00:00 2001 From: ismail-arilik Date: Wed, 2 Sep 2015 17:53:58 +0300 Subject: [PATCH 11/68] Revert colors of some languages. Revert colors of some languages which have 'type: data' attribute. --- lib/linguist/languages.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index ad9ac132..496fbee3 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -153,7 +153,6 @@ Ant Build System: - ant.xml - build.xml ace_mode: xml - color: "#C64A9A" ApacheConf: type: markup @@ -1210,7 +1209,6 @@ Gradle: - .gradle tm_scope: source.groovy.gradle ace_mode: text - color: "#4B0283" Grammatical Framework: type: programming @@ -2027,7 +2025,6 @@ Maven POM: filenames: - pom.xml ace_mode: xml - color: "#007915" Max: type: programming @@ -3694,7 +3691,6 @@ XML: - Web.Release.config - Web.config - packages.config - color: "#E88E88" XPages: type: programming @@ -3773,7 +3769,6 @@ YAML: - .yaml - .yaml-tmlanguage ace_mode: yaml - color: "#56789A" Yacc: type: programming From 4d2b38497dda3ad2258af363e5bf51647c004097 Mon Sep 17 00:00:00 2001 From: ismail-arilik Date: Wed, 2 Sep 2015 18:53:29 +0300 Subject: [PATCH 12/68] Remove color attributes of 'type: data's. --- lib/linguist/languages.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 496fbee3..3d639833 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -795,7 +795,6 @@ Dart: Diff: type: data - color: "#88dddd" extensions: - .diff - .patch @@ -3461,7 +3460,6 @@ Unified Parallel C: Unity3D Asset: type: data ace_mode: yaml - color: "#ab69a1" extensions: - .anim - .asset @@ -3818,7 +3816,6 @@ eC: edn: type: data ace_mode: clojure - color: "#db5855" extensions: - .edn tm_scope: source.clojure From 7b185cc2f3e1062ade8f377b5491bee966d362ad Mon Sep 17 00:00:00 2001 From: Dario Bertini Date: Fri, 23 Oct 2015 20:49:05 +0100 Subject: [PATCH 13/68] Switch to MagicPython for Python sources --- .gitmodules | 3 +++ grammars.yml | 3 ++- vendor/grammars/MagicPython | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) create mode 160000 vendor/grammars/MagicPython diff --git a/.gitmodules b/.gitmodules index be2d7b30..c1975e6e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -683,3 +683,6 @@ [submodule "vendor/grammars/FreeMarker.tmbundle"] path = vendor/grammars/FreeMarker.tmbundle url = https://github.com/freemarker/FreeMarker.tmbundle +[submodule "vendor/grammars/MagicPython"] + path = vendor/grammars/MagicPython + url = git@github.com:MagicStack/MagicPython.git diff --git a/grammars.yml b/grammars.yml index 4eee0d23..19f55efa 100644 --- a/grammars.yml +++ b/grammars.yml @@ -346,8 +346,9 @@ vendor/grammars/language-jsoniq/: - source.xq vendor/grammars/language-ncl: - source.ncl -vendor/grammars/language-python: +vendor/grammars/MagicPython: - source.python +vendor/grammars/language-python: - source.regexp.python - text.python.console - text.python.traceback diff --git a/vendor/grammars/MagicPython b/vendor/grammars/MagicPython new file mode 160000 index 00000000..13bf5348 --- /dev/null +++ b/vendor/grammars/MagicPython @@ -0,0 +1 @@ +Subproject commit 13bf534834036911c57d15c40242e45783aad1ba From b82f563c3854f6393c27a28e177f39cbdba8cf2c Mon Sep 17 00:00:00 2001 From: Siraaj Khandkar Date: Tue, 15 Dec 2015 13:33:03 -0500 Subject: [PATCH 14/68] Add Mathematica test file extension: .mt --- lib/linguist/languages.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 7a33cf1f..c5eb9e6a 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -2061,6 +2061,7 @@ Mathematica: - .cdf - .m - .ma + - .mt - .nb - .nbp - .wl From 0473af368f049e7a89812fa93068a567c877cc04 Mon Sep 17 00:00:00 2001 From: Siraaj Khandkar Date: Tue, 15 Dec 2015 13:37:55 -0500 Subject: [PATCH 15/68] Add sample Mathematica test files. --- samples/Mathematica/TestArithmetic.mt | 1 + samples/Mathematica/TestString.mt | 1 + samples/Mathematica/TestSuite.mt | 5 +++++ 3 files changed, 7 insertions(+) create mode 100644 samples/Mathematica/TestArithmetic.mt create mode 100644 samples/Mathematica/TestString.mt create mode 100644 samples/Mathematica/TestSuite.mt diff --git a/samples/Mathematica/TestArithmetic.mt b/samples/Mathematica/TestArithmetic.mt new file mode 100644 index 00000000..40cbe122 --- /dev/null +++ b/samples/Mathematica/TestArithmetic.mt @@ -0,0 +1 @@ +Test[1 + 2, 3, TestID -> "One plus two"] diff --git a/samples/Mathematica/TestString.mt b/samples/Mathematica/TestString.mt new file mode 100644 index 00000000..c0d61036 --- /dev/null +++ b/samples/Mathematica/TestString.mt @@ -0,0 +1 @@ +Test[1"a" <> "b", "ab", TestID -> "Concat \"a\" and \"b\""] diff --git a/samples/Mathematica/TestSuite.mt b/samples/Mathematica/TestSuite.mt new file mode 100644 index 00000000..b18ae062 --- /dev/null +++ b/samples/Mathematica/TestSuite.mt @@ -0,0 +1,5 @@ +TestSuite[ + { "TestArithmetic.mt" + , "TestString.mt" + } +] From aa2e3000cd79c19f08b1eb62ca4dc413d857df5f Mon Sep 17 00:00:00 2001 From: Siraaj Khandkar Date: Wed, 16 Dec 2015 14:00:48 -0500 Subject: [PATCH 16/68] Remove copypasta artifact. --- samples/Mathematica/TestString.mt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/Mathematica/TestString.mt b/samples/Mathematica/TestString.mt index c0d61036..20539407 100644 --- a/samples/Mathematica/TestString.mt +++ b/samples/Mathematica/TestString.mt @@ -1 +1 @@ -Test[1"a" <> "b", "ab", TestID -> "Concat \"a\" and \"b\""] +Test["a" <> "b", "ab", TestID -> "Concat \"a\" and \"b\""] From 00a436f1757c8e0f13ee4de6fa390b13059a53fd Mon Sep 17 00:00:00 2001 From: Syed Humza Shah Date: Fri, 18 Dec 2015 01:24:05 +0000 Subject: [PATCH 17/68] used proper 'if/elsif/end' syntax --- lib/linguist.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/linguist.rb b/lib/linguist.rb index 3929efb9..589869c6 100644 --- a/lib/linguist.rb +++ b/lib/linguist.rb @@ -13,8 +13,8 @@ class << Linguist def instrument(*args, &bk) if instrumenter instrumenter.instrument(*args, &bk) - else - yield if block_given? + elsif block_given? + yield end end end From 97afedd86114caf1dc5f8c2e7f991a5009747107 Mon Sep 17 00:00:00 2001 From: Syed Humza Shah Date: Fri, 18 Dec 2015 01:27:58 +0000 Subject: [PATCH 18/68] removed usage of keyword as variable this was done mainly to fix annoying syntax highlighting --- lib/linguist/lazy_blob.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/linguist/lazy_blob.rb b/lib/linguist/lazy_blob.rb index 28fb78f3..5418eeae 100644 --- a/lib/linguist/lazy_blob.rb +++ b/lib/linguist/lazy_blob.rb @@ -86,8 +86,8 @@ module Linguist protected # Returns true if the attribute is present and not the string "false". - def boolean_attribute(attr) - attr != "false" + def boolean_attribute(attrbute) + attribute != "false" end def load_blob! From a3613dc4380ee5f3bea0bf5b1d3fb292347780a5 Mon Sep 17 00:00:00 2001 From: Syed Humza Shah Date: Fri, 18 Dec 2015 01:43:01 +0000 Subject: [PATCH 19/68] fixed a typo --- lib/linguist/lazy_blob.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/linguist/lazy_blob.rb b/lib/linguist/lazy_blob.rb index 5418eeae..e828dca6 100644 --- a/lib/linguist/lazy_blob.rb +++ b/lib/linguist/lazy_blob.rb @@ -86,7 +86,7 @@ module Linguist protected # Returns true if the attribute is present and not the string "false". - def boolean_attribute(attrbute) + def boolean_attribute(attribute) attribute != "false" end From 71dfac26fe4e6d1541f1e5390e566965d6850aed Mon Sep 17 00:00:00 2001 From: Paul Chaignon Date: Sun, 20 Dec 2015 13:09:00 +0100 Subject: [PATCH 20/68] .wiki extension as Mediawiki --- lib/linguist/languages.yml | 1 + samples/MediaWiki/README.wiki | 694 ++++++++++++++++++++++++++++++++++ 2 files changed, 695 insertions(+) create mode 100644 samples/MediaWiki/README.wiki diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index c5eb9e6a..c20a549f 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -2108,6 +2108,7 @@ MediaWiki: wrap: true extensions: - .mediawiki + - .wiki tm_scope: text.html.mediawiki ace_mode: text diff --git a/samples/MediaWiki/README.wiki b/samples/MediaWiki/README.wiki new file mode 100644 index 00000000..546c3290 --- /dev/null +++ b/samples/MediaWiki/README.wiki @@ -0,0 +1,694 @@ += Name = + +'''nginx_tcp_proxy_module''' - support TCP proxy with Nginx + += Installation = + +Download the latest stable version of the release tarball of this module from [http://github.com/yaoweibin/nginx_tcp_proxy_module github] + +Grab the nginx source code from [http://nginx.org/ nginx.org], for example, the version 1.2.1 (see nginx compatibility), and then build the source with this module: + + + $ wget 'http://nginx.org/download/nginx-1.2.1.tar.gz' + $ tar -xzvf nginx-1.2.1.tar.gz + $ cd nginx-1.2.1/ + $ patch -p1 < /path/to/nginx_tcp_proxy_module/tcp.patch + + $ ./configure --add-module=/path/to/nginx_tcp_proxy_module + + $ make + $ make install + + + += Synopsis = + + +http { + + server { + listen 80; + + location /status { + tcp_check_status; + } + } +} + + + + +#You can also include tcp_proxy.conf file individually + +#include /path/to/tcp_proxy.conf; + +tcp { + + upstream cluster { + # simple round-robin + server 192.168.0.1:80; + server 192.168.0.2:80; + + check interval=3000 rise=2 fall=5 timeout=1000; + + #check interval=3000 rise=2 fall=5 timeout=1000 type=ssl_hello; + + #check interval=3000 rise=2 fall=5 timeout=1000 type=http; + #check_http_send "GET / HTTP/1.0\r\n\r\n"; + #check_http_expect_alive http_2xx http_3xx; + } + + server { + listen 8888; + + proxy_pass cluster; + } +} + + += Description = + +This module actually include many modules: ngx_tcp_module, ngx_tcp_core_module, ngx_tcp_upstream_module, ngx_tcp_proxy_module, ngx_tcp_websocket_module, ngx_tcp_ssl_module, ngx_tcp_upstream_ip_hash_module. All these modules work together to support TCP proxy with Nginx. I also added other features: ip_hash, upstream server health check, status monitor. + +The motivation of writing these modules is Nginx's high performance and robustness. At first, I developed this module just for general TCP proxy. And now, this module is frequently used in websocket reverse proxying. + +Note, You can't use the same listening port with HTTP modules. + += Directives = + +== ngx_tcp_moodule == + +=== tcp === + +'''syntax:''' ''tcp {...}'' + +'''default:''' ''none'' + +'''context:''' ''main'' + +'''description:''' All the tcp related directives are contained in the tcp block. + + +'''ngx_tcp_core_moodule''' + +=== server === + +'''syntax:''' ''server {...}'' + +'''default:''' ''none'' + +'''context:''' ''tcp'' + +'''description:''' All the specific server directives are contained in the server block. + +=== listen === + +'''syntax:''' ''listen address:port [ bind | ssl | default]'' + +'''default:''' ''none'' + +'''context:''' ''server'' + +'''description:''' The same as [http://wiki.nginx.org/NginxMailCoreModule#listen listen]. The parameter of default means the default server if you have several server blocks with the same port. + +=== access_log === + +'''syntax:''' ''access_log path [buffer=size] | off'' + +'''default:''' ''access_log logs/tcp_access.log'' + +'''context:''' ''tcp, server'' + +'''description:''' Set the access.log. Each record's format is like this: + +
+
+log_time worker_process_pid client_ip host_ip accept_time upstream_ip bytes_read bytes_write
+
+2011/08/02 06:19:07 [5972] 127.0.0.1 0.0.0.0:1982 2011/08/02 06:18:19 172.19.0.129:80 80 236305
+
+
+ +* ''log_time'': The current time when writing this log. The log action is called when the proxy session is closed. +* ''worker_process_pid'': the pid of worker process +* ''client_ip'': the client ip +* ''host_ip'': the server ip and port +* ''accept_time'': the time when the server accepts client's connection +* ''upstream_ip'': the upstream server's ip +* ''bytes_read'': the bytes read from client +* ''bytes_write'': the bytes written to client + +=== allow === + +'''syntax:''' ''allow [ address | CIDR | all ]'' + +'''default:''' ''none'' + +'''context:''' ''server'' + +'''description:''' Directive grants access for the network or addresses indicated. + +=== deny === + +'''syntax:''' ''deny [ address | CIDR | all ]'' + +'''default:''' ''none'' + +'''context:''' ''server'' + +'''description:''' Directive grants access for the network or addresses indicated. + +=== so_keepalive === + +'''syntax:''' ''so_keepalive on|off'' + +'''default:''' ''off'' + +'''context:''' ''main, server'' + +'''description:''' The same as [http://wiki.nginx.org/NginxMailCoreModule#so_keepalive so_keepalive]. + +=== tcp_nodelay === + +'''syntax:''' ''tcp_nodelay on|off'' + +'''default:''' ''on'' + +'''context:''' ''main, server'' + +'''description:''' The same as [http://wiki.nginx.org/NginxHttpCoreModule#tcp_nodelay tcp_nodelay]. + +=== timeout === + +'''syntax:''' ''timeout milliseconds'' + +'''default:''' ''60000'' + +'''context:''' ''main, server'' + +'''description:''' set the timeout value with clients. + +=== server_name === + +'''syntax:''' ''server_name name'' + +'''default:''' ''The name of the host, obtained through gethostname()'' + +'''context:''' ''tcp, server'' + +'''description:''' The same as [http://wiki.nginx.org/NginxMailCoreModule#server_name server_name]. You can specify several server name in different server block with the same port. They can be used in websocket module. + +=== resolver === + +'''syntax:''' ''resolver address'' + +'''default:''' ''none'' + +'''context:''' ''tcp, server'' + +'''description:''' DNS server + +=== resolver_timeout === + +'''syntax:''' ''resolver_timeout time'' + +'''default:''' ''30s'' + +'''context:''' ''tcp, server'' + +'''description:''' Resolver timeout in seconds. + + +== ngx_tcp_upstream_module == + +=== upstream === + +'''syntax:''' ''upstream {...}'' + +'''default:''' ''none'' + +'''context:''' ''tcp'' + +'''description:''' All the upstream directives are contained in this block. The upstream server will be dispatched with round robin by default. + +=== server === + +'''syntax:''' ''server name [parameters]'' + +'''default:''' ''none'' + +'''context:''' ''upstream'' + +'''description:''' Most of the parameters are the same as [http://wiki.nginx.org/NginxHttpUpstreamModule#server server]. Default port is 80. + +=== check === + +'''syntax:''' ''check interval=milliseconds [fall=count] [rise=count] [timeout=milliseconds] [type=tcp|ssl_hello|smtp|mysql|pop3|imap]'' + +'''default:''' ''none, if parameters omitted, default parameters are interval=30000 fall=5 rise=2 timeout=1000'' + +'''context:''' ''upstream'' + +'''description:''' Add the health check for the upstream servers. At present, the check method is a simple tcp connect. + +The parameters' meanings are: + +* ''interval'': the check request's interval time. +* ''fall''(fall_count): After fall_count check failures, the server is marked down. +* ''rise''(rise_count): After rise_count check success, the server is marked up. +* ''timeout'': the check request's timeout. +* ''type'': the check protocol type: +# ''tcp'' is a simple tcp socket connect and peek one byte. +# ''ssl_hello'' sends a client ssl hello packet and receives the server ssl hello packet. +# ''http'' sends a http request packet, receives and parses the http response to diagnose if the upstream server is alive. +# ''smtp'' sends a smtp request packet, receives and parses the smtp response to diagnose if the upstream server is alive. The response begins with '2' should be an OK response. +# ''mysql'' connects to the mysql server, receives the greeting response to diagnose if the upstream server is alive. +# ''pop3'' receives and parses the pop3 response to diagnose if the upstream server is alive. The response begins with '+' should be an OK response. +# ''imap'' connects to the imap server, receives the greeting response to diagnose if the upstream server is alive. + +=== check_http_send === + +'''syntax:''' ''check_http_send http_packet'' + +'''default:''' ''"GET / HTTP/1.0\r\n\r\n"'' + +'''context:''' ''upstream'' + +'''description:''' If you set the check type is http, then the check function will sends this http packet to check the upstream server. + +=== check_http_expect_alive === + +'''syntax:''' ''check_http_expect_alive [ http_2xx | http_3xx | http_4xx | http_5xx ]'' + +'''default:''' ''http_2xx | http_3xx'' + +'''context:''' ''upstream'' + +'''description:''' These status codes indicate the upstream server's http response is OK, the backend is alive. + +=== check_smtp_send === + +'''syntax:''' ''check_smtp_send smtp_packet'' + +'''default:''' ''"HELO smtp.localdomain\r\n"'' + +'''context:''' ''upstream'' + +'''description:''' If you set the check type is smtp, then the check function will sends this smtp packet to check the upstream server. + +=== check_smtp_expect_alive === + +'''syntax:''' ''check_smtp_expect_alive [smtp_2xx | smtp_3xx | smtp_4xx | smtp_5xx]'' + +'''default:''' ''smtp_2xx'' + +'''context:''' ''upstream'' + +'''description:''' These status codes indicate the upstream server's smtp response is OK, the backend is alive. + +=== check_shm_size === + +'''syntax:''' ''check_shm_size size'' + +'''default:''' ''(number_of_checked_upstream_blocks + 1) * pagesize'' + +'''context:''' ''tcp'' + +'''description:''' If you store hundreds of servers in one upstream block, the shared memory for health check may be not enough, you can enlarged it by this directive. + +=== tcp_check_status === + +'''syntax:''' ''tcp_check_status'' + +'''default:''' ''none'' + +'''context:''' ''location'' + +'''description:''' Display the health checking servers' status by HTTP. This directive is set in the http block. + +The table field meanings are: + +* ''Index'': The server index in the check table +* ''Name'' : The upstream server name +* ''Status'': The marked status of the server. +* ''Busyness'': The number of connections which are connecting to the server. +* ''Rise counts'': Count the successful checking +* ''Fall counts'': Count the unsuccessful checking +* ''Access counts'': Count the times accessing to this server +* ''Check type'': The type of the check packet + + +'''ngx_tcp_upstream_busyness_module''' + +=== busyness === + +'''syntax:''' ''busyness'' + +'''default:''' ''none'' + +'''context:''' ''upstream'' + +'''description:''' the upstream server will be dispatched by backend servers' busyness. + + +'''ngx_tcp_upstream_ip_hash_module''' + +=== ip_hash === + +'''syntax:''' ''ip_hash'' + +'''default:''' ''none'' + +'''context:''' ''upstream'' + +'''description:''' the upstream server will be dispatched by ip_hash. + + +== ngx_tcp_proxy_module == + +=== proxy_pass === + +'''syntax:''' ''proxy_pass host:port'' + +'''default:''' ''none'' + +'''context:''' ''server'' + +'''description:''' proxy the request to the backend server. Default port is 80. + +=== proxy_buffer === + +'''syntax:''' ''proxy_buffer size'' + +'''default:''' ''4k'' + +'''context:''' ''tcp, server'' + +'''description:''' set the size of proxy buffer. + +=== proxy_connect_timeout === + +'''syntax:''' ''proxy_connect_timeout miliseconds'' + +'''default:''' ''60000'' + +'''context:''' ''tcp, server'' + +'''description:''' set the timeout value of connection to backends. + +=== proxy_read_timeout === + +'''syntax:''' ''proxy_read_timeout miliseconds'' + +'''default:''' ''60000'' + +'''context:''' ''tcp, server'' + +'''description:''' set the timeout value of reading from backends. + +=== proxy_send_timeout === + +'''syntax:''' ''proxy_send_timeout miliseconds'' + +'''default:''' ''60000'' + +'''context:''' ''tcp, server'' + +'''description:''' set the timeout value of sending to backends. + + +== ngx_tcp_websocket_module == + +=== websocket_pass === + +'''syntax:''' ''websocket_pass [path] host:port'' + +'''default:''' ''none'' + +'''context:''' ''server'' + +'''description:''' proxy the websocket request to the backend server. Default port is 80. You can specify several different paths in the same server block. + +=== websocket_buffer === + +'''syntax:''' ''websocket_buffer size'' + +'''default:''' ''4k'' + +'''context:''' ''tcp, server'' + +'''description:''' set the size of proxy buffer. + +=== websocket_connect_timeout === + +'''syntax:''' ''websocket_connect_timeout miliseconds'' + +'''default:''' ''60000'' + +'''context:''' ''tcp, server'' + +'''description:''' set the timeout value of connection to backends. + +=== websocket_read_timeout === + +'''syntax:''' ''websocket_read_timeout miliseconds'' + +'''default:''' ''60000'' + +'''context:''' ''tcp, server'' + +'''description:''' set the timeout value of reading from backends. Your timeout will be the minimum of this and the *timeout* parameter, so if you want a long timeout for your websockets, make sure to set both paramaters. + +=== websocket_send_timeout === + +'''syntax:''' ''websocket_send_timeout miliseconds'' + +'''default:''' ''60000'' + +'''context:''' ''tcp, server'' + +'''description:''' set the timeout value of sending to backends. + + +== ngx_tcp_ssl_module == + +The default config file includes this ngx_tcp_ssl_module. If you want to just compile nginx without ngx_tcp_ssl_module, copy the ngx_tcp_proxy_module/config_without_ssl to ngx_tcp_proxy_module/config, reconfigrure and compile nginx. + +=== ssl === + +'''syntax:''' ''ssl [on|off] '' + +'''default:''' ''ssl off'' + +'''context:''' ''tcp, server'' + +Enables SSL for a server. + +=== ssl_certificate === + +'''syntax:''' ''ssl_certificate file'' + +'''default:''' ''ssl_certificate cert.pem'' + +'''context:''' ''tcp, server'' + +This directive specifies the file containing the certificate, in PEM format. This file can contain also other certificates and the server private key. + +=== ssl_certificate_key === + +'''syntax:''' ''ssl_certificate_key file'' + +'''default:''' ''ssl_certificate_key cert.pem'' + +'''context:''' ''tcp, server'' + +This directive specifies the file containing the private key, in PEM format. + +=== ssl_client_certificate === + +'''syntax:''' ''ssl_client_certificate file'' + +'''default:''' ''none'' + +'''context:''' ''tcp, server'' + +This directive specifies the file containing the CA (root) certificate, in PEM format, that is used for validating client certificates. + +=== ssl_dhparam === + +'''syntax:''' ''ssl_dhparam file'' + +'''default:''' ''none'' + +'''context:''' ''tcp, server'' + +This directive specifies a file containing Diffie-Hellman key agreement protocol cryptographic parameters, in PEM format, utilized for exchanging session keys between server and client. + +=== ssl_ciphers === + +'''syntax:''' ''ssl_ciphers openssl_cipherlist_spec'' + +'''default:''' ''ssl_ciphers HIGH:!aNULL:!MD5'' + +'''context:''' ''tcp, server'' + +This directive describes the list of cipher suites the server supports for establishing a secure connection. Cipher suites are specified in the [http://openssl.org/docs/apps/ciphers.html OpenSSL] cipherlist format, for example: + + +ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP; + + +The complete cipherlist supported by the currently installed version of OpenSSL in your platform can be obtained by issuing the command: +
+openssl ciphers
+
+ +=== ssl_crl === + +'''syntax:''' ''ssl_crl file'' + +'''default:''' ''none'' + +'''context:''' ''tcp, server'' + +This directive specifies the filename of a Certificate Revocation List, in PEM format, which is used to check the revocation status of certificates. + +=== ssl_prefer_server_ciphers === + +'''syntax:''' ''ssl_prefer_server_ciphers [on|off] '' + +'''default:''' ''ssl_prefer_server_ciphers off'' + +'''context:''' ''tcp, server'' + +The server requires that the cipher suite list for protocols SSLv3 and TLSv1 are to be preferred over the client supported cipher suite list. + +=== ssl_protocols === + +'''syntax:''' ''ssl_protocols [SSLv2] [SSLv3] [TLSv1] [TLSv1.1] [TLSv1.2]'' + +'''default:''' ''ssl_protocols SSLv3 TLSv1 TLSv1.1 TLSv1.2'' + +'''context:''' ''tcp, server'' + +This directive enables the protocol versions specified. + +=== ssl_verify_client === + +'''syntax:''' ''ssl_verify_client on|off|optional'' + +'''default:''' ''ssl_verify_client off'' + +'''context:''' ''tcp, server'' + +This directive enables the verification of the client identity. Parameter 'optional' checks the client identity using its certificate in case it was made available to the server. + +=== ssl_verify_depth === + +'''syntax:''' ''ssl_verify_depth number'' + +'''default:''' ''ssl_verify_depth 1'' + +'''context:''' ''tcp, server'' + +This directive sets how deep the server should go in the client provided certificate chain in order to verify the client identity. + +=== ssl_session_cache === + +'''syntax:''' ''ssl_session_cache off|none|builtin:size and/or shared:name:size'' + +'''default:''' ''ssl_session_cache off'' + +'''context:''' ''tcp, server'' + +The directive sets the types and sizes of caches to store the SSL sessions. + +The cache types are: + +* off -- Hard off: nginx says explicitly to a client that sessions can not reused. +* none -- Soft off: nginx says to a client that session can be resued, but nginx actually never reuses them. This is workaround for some mail clients as ssl_session_cache may be used in mail proxy as well as in HTTP server. +* builtin -- the OpenSSL builtin cache, is used inside one worker process only. The cache size is assigned in the number of the sessions. Note: there appears to be a memory fragmentation issue using this method, please take that into consideration when using this. See "References" below. +* shared -- the cache is shared between all worker processes. The size of the cache is assigned in bytes: 1 MB cache can contain roughly 4000 sessions. Each shared cache must be given an arbitrary name. A shared cache with a given name can be used in several virtual hosts. +It's possible to use both types of cache — builtin and shared — simultaneously, for example: + + +ssl_session_cache builtin:1000 shared:SSL:10m; + + +Bear in mind however, that using only shared cache, i.e., without builtin, should be more effective. + +=== ssl_session_timeout === + +'''syntax:''' ''ssl_session_timeout time'' + +'''default:''' ''ssl_session_timeout 5m'' + +'''context:''' ''tcp, server'' + +This directive defines the maximum time during which the client can re-use the previously negotiated cryptographic parameters of the secure session that is stored in the SSL cache. + += Compatibility = + +* My test bed is 0.7.65+ + += Notes = + +The http_response_parse.rl and smtp_response_parse.rl are [http://www.complang.org/ragel/ ragel] scripts , you can edit the script and compile it like this: + + + $ ragel -G2 http_response_parse.rl + $ ragel -G2 smtp_response_parse.rl + + += TODO = + +* refact this module, make it more extendable for adding third-party modules +* manipulate header like http module's proxy_set_header +* built-in variable support +* custom log format +* syslog support +* FTP/IRC proxying + += Known Issues = + +* This module can't use the same listening port with the HTTP module. + += Changelogs = + +== v0.2.0 == + +* add ssl proxy module +* add websocket proxy module +* add upstream busyness module +* add tcp access log module + +== v0.19 == + +* add many check methods + +== v0.1 == + +* first release + += Authors = + +Weibin Yao(姚伟斌) ''yaoweibin at gmail dot com'' + += Copyright & License = + +This README template copy from [http://github.com/agentzh agentzh]. + +I borrowed a lot of code from upstream and mail module from the nginx 0.7.* core. This part of code is copyrighted by Igor Sysoev. And the health check part is borrowed the design of Jack Lindamood's healthcheck module [http://github.com/cep21/healthcheck_nginx_upstreams healthcheck_nginx_upstreams]; + +This module is licensed under the BSD license. + +Copyright (C) 2013 by Weibin Yao . + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. From 189b2d684b5ea6d3663697846be5013552e07a98 Mon Sep 17 00:00:00 2001 From: Paul Chaignon Date: Sun, 20 Dec 2015 16:21:11 +0100 Subject: [PATCH 21/68] .rst.txt and .rest.txt as reStructuredText extensions --- HACKING.rst.txt | 329 +++++++++++++++++++++++++++++++++++++ lib/linguist/languages.yml | 2 + 2 files changed, 331 insertions(+) create mode 100644 HACKING.rst.txt diff --git a/HACKING.rst.txt b/HACKING.rst.txt new file mode 100644 index 00000000..6e078204 --- /dev/null +++ b/HACKING.rst.txt @@ -0,0 +1,329 @@ +Contributing to SciPy +===================== + +This document aims to give an overview of how to contribute to SciPy. It +tries to answer commonly asked questions, and provide some insight into how the +community process works in practice. Readers who are familiar with the SciPy +community and are experienced Python coders may want to jump straight to the +`git workflow`_ documentation. + + +Contributing new code +--------------------- + +If you have been working with the scientific Python toolstack for a while, you +probably have some code lying around of which you think "this could be useful +for others too". Perhaps it's a good idea then to contribute it to SciPy or +another open source project. The first question to ask is then, where does +this code belong? That question is hard to answer here, so we start with a +more specific one: *what code is suitable for putting into SciPy?* +Almost all of the new code added to scipy has in common that it's potentially +useful in multiple scientific domains and it fits in the scope of existing +scipy submodules. In principle new submodules can be added too, but this is +far less common. For code that is specific to a single application, there may +be an existing project that can use the code. Some scikits (`scikit-learn`_, +`scikits-image`_, `statsmodels`_, etc.) are good examples here; they have a +narrower focus and because of that more domain-specific code than SciPy. + +Now if you have code that you would like to see included in SciPy, how do you +go about it? After checking that your code can be distributed in SciPy under a +compatible license (see FAQ for details), the first step is to discuss on the +scipy-dev mailing list. All new features, as well as changes to existing code, +are discussed and decided on there. You can, and probably should, already +start this discussion before your code is finished. + +Assuming the outcome of the discussion on the mailing list is positive and you +have a function or piece of code that does what you need it to do, what next? +Before code is added to SciPy, it at least has to have good documentation, unit +tests and correct code style. + +1. Unit tests + In principle you should aim to create unit tests that exercise all the code + that you are adding. This gives some degree of confidence that your code + runs correctly, also on Python versions and hardware or OSes that you don't + have available yourself. An extensive description of how to write unit + tests is given in the NumPy `testing guidelines`_. + +2. Documentation + Clear and complete documentation is essential in order for users to be able + to find and understand the code. Documentation for individual functions + and classes -- which includes at least a basic description, type and + meaning of all parameters and returns values, and usage examples in + `doctest`_ format -- is put in docstrings. Those docstrings can be read + within the interpreter, and are compiled into a reference guide in html and + pdf format. Higher-level documentation for key (areas of) functionality is + provided in tutorial format and/or in module docstrings. A guide on how to + write documentation is given in `how to document`_. + +3. Code style + Uniformity of style in which code is written is important to others trying + to understand the code. SciPy follows the standard Python guidelines for + code style, `PEP8`_. In order to check that your code conforms to PEP8, + you can use the `pep8 package`_ style checker. Most IDEs and text editors + have settings that can help you follow PEP8, for example by translating + tabs by four spaces. Using `pyflakes`_ to check your code is also a good + idea. + +At the end of this document a checklist is given that may help to check if your +code fulfills all requirements for inclusion in SciPy. + +Another question you may have is: *where exactly do I put my code*? To answer +this, it is useful to understand how the SciPy public API (application +programming interface) is defined. For most modules the API is two levels +deep, which means your new function should appear as +``scipy.submodule.my_new_func``. ``my_new_func`` can be put in an existing or +new file under ``/scipy//``, its name is added to the ``__all__`` +dict in that file (which lists all public functions in the file), and those +public functions are then imported in ``/scipy//__init__.py``. Any +private functions/classes should have a leading underscore (``_``) in their +name. A more detailed description of what the public API of SciPy is, is given +in `SciPy API`_. + +Once you think your code is ready for inclusion in SciPy, you can send a pull +request (PR) on Github. We won't go into the details of how to work with git +here, this is described well in the `git workflow`_ section of the NumPy +documentation and in the Github help pages. When you send the PR for a new +feature, be sure to also mention this on the scipy-dev mailing list. This can +prompt interested people to help review your PR. Assuming that you already got +positive feedback before on the general idea of your code/feature, the purpose +of the code review is to ensure that the code is correct, efficient and meets +the requirements outlined above. In many cases the code review happens +relatively quickly, but it's possible that it stalls. If you have addressed +all feedback already given, it's perfectly fine to ask on the mailing list +again for review (after a reasonable amount of time, say a couple of weeks, has +passed). Once the review is completed, the PR is merged into the "master" +branch of SciPy. + +The above describes the requirements and process for adding code to SciPy. It +doesn't yet answer the question though how decisions are made exactly. The +basic answer is: decisions are made by consensus, by everyone who chooses to +participate in the discussion on the mailing list. This includes developers, +other users and yourself. Aiming for consensus in the discussion is important +-- SciPy is a project by and for the scientific Python community. In those +rare cases that agreement cannot be reached, the `maintainers`_ of the module +in question can decide the issue. + + +Contributing by helping maintain existing code +---------------------------------------------- + +The previous section talked specifically about adding new functionality to +SciPy. A large part of that discussion also applies to maintenance of existing +code. Maintenance means fixing bugs, improving code quality or style, +documenting existing functionality better, adding missing unit tests, keeping +build scripts up-to-date, etc. The SciPy `Trac`_ bug tracker contains all +reported bugs, build/documentation issues, etc. Fixing issues described in +Trac tickets helps improve the overall quality of SciPy, and is also a good way +of getting familiar with the project. You may also want to fix a bug because +you ran into it and need the function in question to work correctly. + +The discussion on code style and unit testing above applies equally to bug +fixes. It is usually best to start by writing a unit test that shows the +problem, i.e. it should pass but doesn't. Once you have that, you can fix the +code so that the test does pass. That should be enough to send a PR for this +issue. Unlike when adding new code, discussing this on the mailing list may +not be necessary - if the old behavior of the code is clearly incorrect, no one +will object to having it fixed. It may be necessary to add some warning or +deprecation message for the changed behavior. This should be part of the +review process. + + +Other ways to contribute +------------------------ + +There are many ways to contribute other than contributing code. Participating +in discussions on the scipy-user and scipy-dev *mailing lists* is a contribution +in itself. The `scipy.org`_ *website* contains a lot of information on the +SciPy community and can always use a new pair of hands. A redesign of this +website is ongoing, see `scipy.github.com`_. The redesigned website is a +static site based on Sphinx, the sources for it are +also on Github at `scipy.org-new`_. + +The SciPy *documentation* is constantly being improved by many developers and +users. You can contribute by sending a PR on Github that improves the +documentation, but there's also a `documentation wiki`_ that is very convenient +for making edits to docstrings (and doesn't require git knowledge). Anyone can +register a username on that wiki, ask on the scipy-dev mailing list for edit +rights and make edits. The documentation there is updated every day with the +latest changes in the SciPy master branch, and wiki edits are regularly +reviewed and merged into master. Another advantage of the documentation wiki +is that you can immediately see how the reStructuredText (reST) of docstrings +and other docs is rendered as html, so you can easily catch formatting errors. + +Code that doesn't belong in SciPy itself or in another package but helps users +accomplish a certain task is valuable. `SciPy Central`_ is the place to share +this type of code (snippets, examples, plotting code, etc.). + + +Useful links, FAQ, checklist +---------------------------- + +Checklist before submitting a PR +```````````````````````````````` + + - Are there unit tests with good code coverage? + - Do all public function have docstrings including examples? + - Is the code style correct (PEP8, pyflakes) + - Is the new functionality tagged with ``.. versionadded:: X.Y.Z`` (with + X.Y.Z the version number of the next release - can be found in setup.py)? + - Is the new functionality mentioned in the release notes of the next + release? + - Is the new functionality added to the reference guide? + - In case of larger additions, is there a tutorial or more extensive + module-level description? + - In case compiled code is added, is it integrated correctly via setup.py + (and preferably also Bento/Numscons configuration files)? + - If you are a first-time contributor, did you add yourself to THANKS.txt? + Please note that this is perfectly normal and desirable - the aim is to + give every single contributor credit, and if you don't add yourself it's + simply extra work for the reviewer (or worse, the reviewer may forget). + - Did you check that the code can be distributed under a BSD license? + + +Useful SciPy documents +`````````````````````` + + - The `how to document`_ guidelines + - NumPy/SciPy `testing guidelines`_ + - `SciPy API`_ + - SciPy `maintainers`_ + - NumPy/SciPy `git workflow`_ + + +FAQ +``` + +*I based my code on existing Matlab/R/... code I found online, is this OK?* + +It depends. SciPy is distributed under a BSD license, so if the code that you +based your code on is also BSD licensed or has a BSD-compatible license (MIT, +Apache, ...) then it's OK. Code which is GPL-licensed, has no clear license, +requires citation or is free for academic use only can't be included in SciPy. +Therefore if you copied existing code with such a license or made a direct +translation to Python of it, your code can't be included. See also `license +compatibility`_. + + +*How do I set up SciPy so I can edit files, run the tests and make commits?* + +The simplest method is setting up an in-place build. To create your local git +repo and do the in-place build:: + + $ git clone https://github.com/scipy/scipy.git scipy + $ cd scipy + $ python setup.py build_ext -i + +Then you need to either set up a symlink in your site-packages or add this +directory to your PYTHONPATH environment variable, so Python can find it. Some +IDEs (Spyder for example) have utilities to manage PYTHONPATH. On Linux and OS +X, you can for example edit your .bash_login file to automatically add this dir +on startup of your terminal. Add the line:: + + export PYTHONPATH="$HOME/scipy:${PYTHONPATH}" + +Alternatively, to set up the symlink, use (prefix only necessary if you want to +use your local instead of global site-packages dir):: + + $ python setupegg.py develop --prefix=${HOME} + +To test that everything works, start the interpreter (not inside the scipy/ +source dir) and run the tests:: + + $ python + >>> import scipy as sp + >>> sp.test() + +Now editing a Python source file in SciPy allows you to immediately test and +use your changes, by simply restarting the interpreter. + +Note that while the above procedure is the most straightforward way to get +started, you may want to look into using Bento or numscons for faster and more +flexible building, or virtualenv to maintain development environments for +multiple Python versions. + + +*How do I set up a development version of SciPy in parallel to a released +version that I use to do my job/research?* + +One simple way to achieve this is to install the released version in +site-packages, by using a binary installer or pip for example, and set up the +development version with an in-place build in a virtualenv. First install +`virtualenv`_ and `virtualenvwrapper`_, then create your virtualenv (named +scipy-dev here) with:: + + $ mkvirtualenv scipy-dev + +Now, whenever you want to switch to the virtual environment, you can use the +command ``workon scipy-dev``, while the command ``deactivate`` exits from the +virtual environment and brings back your previous shell. With scipy-dev +activated, follow the in-place build with the symlink install above to actually +install your development version of SciPy. + + +*Can I use a programming language other than Python to speed up my code?* + +Yes. The languages used in SciPy are Python, Cython, C, C++ and Fortran. All +of these have their pros and cons. If Python really doesn't offer enough +performance, one of those languages can be used. Important concerns when +using compiled languages are maintainability and portability. For +maintainability, Cython is clearly preferred over C/C++/Fortran. Cython and C +are more portable than C++/Fortran. A lot of the existing C and Fortran code +in SciPy is older, battle-tested code that was only wrapped in (but not +specifically written for) Python/SciPy. Therefore the basic advice is: use +Cython. If there's specific reasons why C/C++/Fortran should be preferred, +please discuss those reasons first. + + +*There's overlap between Trac and Github, which do I use for what?* + +Trac_ is the bug tracker, Github_ the code repository. Before the SciPy code +repository moved to Github, the preferred way to contribute code was to create +a patch and attach it to a Trac ticket. The overhead of this approach is much +larger than sending a PR on Github, so please don't do this anymore. Use Trac +for bug reports, Github for patches. + + +.. _scikit-learn: http://scikit-learn.org + +.. _scikits-image: http://scikits-image.org/ + +.. _statsmodels: http://statsmodels.sourceforge.net/ + +.. _testing guidelines: https://github.com/numpy/numpy/blob/master/doc/TESTS.rst.txt + +.. _how to document: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt + +.. _PEP8: http://www.python.org/dev/peps/pep-0008/ + +.. _pep8 package: http://pypi.python.org/pypi/pep8 + +.. _pyflakes: http://pypi.python.org/pypi/pyflakes + +.. _SciPy API: http://docs.scipy.org/doc/scipy/reference/api.html + +.. _git workflow: http://docs.scipy.org/doc/numpy/dev/gitwash/index.html + +.. _maintainers: https://github.com/scipy/scipy/blob/master/doc/MAINTAINERS.rst.txt + +.. _Trac: http://projects.scipy.org/scipy/timeline + +.. _Github: https://github.com/scipy/scipy + +.. _scipy.org: http://scipy.org/ + +.. _scipy.github.com: http://scipy.github.com/ + +.. _scipy.org-new: https://github.com/scipy/scipy.org-new + +.. _documentation wiki: http://docs.scipy.org/scipy/Front%20Page/ + +.. _SciPy Central: http://scipy-central.org/ + +.. _license compatibility: http://www.scipy.org/License_Compatibility + +.. _doctest: http://www.doughellmann.com/PyMOTW/doctest/ + +.. _virtualenv: http://www.virtualenv.org/ + +.. _virtualenvwrapper: http://www.doughellmann.com/projects/virtualenvwrapper/ + diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index c20a549f..4ca2fa33 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -3967,6 +3967,8 @@ reStructuredText: extensions: - .rst - .rest + - .rest.txt + - .rst.txt ace_mode: text wisp: From 9a070d7bb3c7f1fa1a5958ed42cf5e30606db652 Mon Sep 17 00:00:00 2001 From: Paul Chaignon Date: Wed, 23 Dec 2015 21:43:55 +0100 Subject: [PATCH 22/68] Reorder Perl heuristic rules Order heuristic rules by accuracy Heuristic rules which are expected to generated less false positives should be tested first --- lib/linguist/heuristics.rb | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/linguist/heuristics.rb b/lib/linguist/heuristics.rb index 7c1c7db1..01be4e33 100644 --- a/lib/linguist/heuristics.rb +++ b/lib/linguist/heuristics.rb @@ -276,20 +276,20 @@ module Linguist end disambiguate ".pl" do |data| - if /^(use v6|(my )?class|module)/.match(data) - Language["Perl6"] + if /^[^#]+:-/.match(data) + Language["Prolog"] elsif /use strict|use\s+v?5\./.match(data) Language["Perl"] - elsif /^[^#]+:-/.match(data) - Language["Prolog"] + elsif /^(use v6|(my )?class|module)/.match(data) + Language["Perl6"] end end disambiguate ".pm", ".t" do |data| - if /^(use v6|(my )?class|module)/.match(data) - Language["Perl6"] - elsif /use strict|use\s+v?5\./.match(data) + if /use strict|use\s+v?5\./.match(data) Language["Perl"] + elsif /^(use v6|(my )?class|module)/.match(data) + Language["Perl6"] end end From 7a709310661cfa481adc9ada7fa47551084230f6 Mon Sep 17 00:00:00 2001 From: jrnold Date: Thu, 24 Dec 2015 15:55:00 -0800 Subject: [PATCH 23/68] add Stan to languages --- lib/linguist/languages.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index c5eb9e6a..d352be07 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -3355,6 +3355,12 @@ Squirrel: tm_scope: source.c++ ace_mode: c_cpp +Stan: + type: programming + extensions: + - .stan + ace_mode: text + Standard ML: type: programming color: "#dc566d" From 5329b967937fa05197caf95cb68caa2892b8ba3e Mon Sep 17 00:00:00 2001 From: jrnold Date: Thu, 24 Dec 2015 15:55:14 -0800 Subject: [PATCH 24/68] add Stan samples --- samples/Stan/congress.stan | 14 ++++++++++++++ samples/Stan/dogs.stan | 31 +++++++++++++++++++++++++++++++ samples/Stan/schools.stan | 26 ++++++++++++++++++++++++++ 3 files changed, 71 insertions(+) create mode 100644 samples/Stan/congress.stan create mode 100644 samples/Stan/dogs.stan create mode 100644 samples/Stan/schools.stan diff --git a/samples/Stan/congress.stan b/samples/Stan/congress.stan new file mode 100644 index 00000000..6e7408e5 --- /dev/null +++ b/samples/Stan/congress.stan @@ -0,0 +1,14 @@ +data { + int N; + vector[N] incumbency_88; + vector[N] vote_86; + vector[N] vote_88; +} +parameters { + vector[3] beta; + real sigma; +} +model { + vote_88 ~ normal(beta[1] + beta[2] * vote_86 + + beta[3] * incumbency_88,sigma); +} diff --git a/samples/Stan/dogs.stan b/samples/Stan/dogs.stan new file mode 100644 index 00000000..ebca16d7 --- /dev/null +++ b/samples/Stan/dogs.stan @@ -0,0 +1,31 @@ +data { + int n_dogs; + int n_trials; + int y[n_dogs,n_trials]; +} +parameters { + vector[3] beta; +} +transformed parameters { + matrix[n_dogs,n_trials] n_avoid; + matrix[n_dogs,n_trials] n_shock; + matrix[n_dogs,n_trials] p; + + for (j in 1:n_dogs) { + n_avoid[j,1] <- 0; + n_shock[j,1] <- 0; + for (t in 2:n_trials) { + n_avoid[j,t] <- n_avoid[j,t-1] + 1 - y[j,t-1]; + n_shock[j,t] <- n_shock[j,t-1] + y[j,t-1]; + } + for (t in 1:n_trials) + p[j,t] <- beta[1] + beta[2] * n_avoid[j,t] + beta[3] * n_shock[j,t]; + } +} +model { + beta ~ normal(0, 100); + for (i in 1:n_dogs) { + for (j in 1:n_trials) + y[i,j] ~ bernoulli_logit(p[i,j]); + } +} diff --git a/samples/Stan/schools.stan b/samples/Stan/schools.stan new file mode 100644 index 00000000..171864a1 --- /dev/null +++ b/samples/Stan/schools.stan @@ -0,0 +1,26 @@ +data { + int N; + vector[N] y; + vector[N] sigma_y; +} +parameters { + vector[N] eta; + real mu_theta; + real sigma_eta; + real xi; +} +transformed parameters { + real sigma_theta; + vector[N] theta; + + theta <- mu_theta + xi * eta; + sigma_theta <- fabs(xi) / sigma_eta; +} +model { + mu_theta ~ normal(0, 100); + sigma_eta ~ inv_gamma(1, 1); //prior distribution can be changed to uniform + + eta ~ normal(0, sigma_eta); + xi ~ normal(0, 5); + y ~ normal(theta,sigma_y); +} From f141abbc730bfbf25f18dae64e69057d47729b43 Mon Sep 17 00:00:00 2001 From: jrnold Date: Thu, 24 Dec 2015 16:05:06 -0800 Subject: [PATCH 25/68] add tm_scope for Stan --- lib/linguist/languages.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index d352be07..94f9f330 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -3360,6 +3360,7 @@ Stan: extensions: - .stan ace_mode: text + tm_scope: none Standard ML: type: programming From 8144438f398ed15f90b5634cc4788d8b28d9fadb Mon Sep 17 00:00:00 2001 From: jrnold Date: Sun, 27 Dec 2015 18:50:43 -0800 Subject: [PATCH 26/68] Add color for Stan Use the brick-red color of the [Stan logo](https://raw.githubusercontent.com/stan-dev/logos/master/logo.png). --- lib/linguist/languages.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 94f9f330..319c445e 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -3357,6 +3357,7 @@ Squirrel: Stan: type: programming + color: "#b2011d" extensions: - .stan ace_mode: text From 1ac16cbec7b41bbdf76131352a4eec9bb29d2079 Mon Sep 17 00:00:00 2001 From: jrnold Date: Mon, 28 Dec 2015 22:31:49 -0800 Subject: [PATCH 27/68] add vendor for Stan --- .gitmodules | 3 +++ grammars.yml | 2 ++ vendor/grammars/atom-language-stan | 1 + 3 files changed, 6 insertions(+) create mode 160000 vendor/grammars/atom-language-stan diff --git a/.gitmodules b/.gitmodules index 8234378a..cde3e37b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -701,3 +701,6 @@ [submodule "vendor/grammars/language-renpy"] path = vendor/grammars/language-renpy url = https://github.com/williamd1k0/language-renpy.git +[submodule "vendor/grammars/atom-language-stan"] + path = vendor/grammars/atom-language-stan + url = git@github.com:jrnold/atom-language-stan.git diff --git a/grammars.yml b/grammars.yml index 8f7b1173..0319774a 100644 --- a/grammars.yml +++ b/grammars.yml @@ -187,6 +187,8 @@ vendor/grammars/atom-fsharp/: - source.fsharp.fsx vendor/grammars/atom-language-purescript/: - source.purescript +vendor/grammars/atom-language-stan/: +- source.stan vendor/grammars/atom-salt: - source.python.salt - source.yaml.salt diff --git a/vendor/grammars/atom-language-stan b/vendor/grammars/atom-language-stan new file mode 160000 index 00000000..72c626ae --- /dev/null +++ b/vendor/grammars/atom-language-stan @@ -0,0 +1 @@ +Subproject commit 72c626ae96792809447ed4b4e20b8792025b7b95 From 24a89d2d75d3840a78b274f028a2ff85d05ca74b Mon Sep 17 00:00:00 2001 From: jrnold Date: Mon, 28 Dec 2015 22:32:53 -0800 Subject: [PATCH 28/68] add tm_grammar for Stan --- lib/linguist/languages.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 319c445e..7d0b4ac3 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -3361,7 +3361,7 @@ Stan: extensions: - .stan ace_mode: text - tm_scope: none + tm_scope: source.stan Standard ML: type: programming From 6f197bacc74337c696ece542f2f3c93b762d46ef Mon Sep 17 00:00:00 2001 From: Nicholas Wolverson Date: Tue, 29 Dec 2015 23:22:51 +0000 Subject: [PATCH 29/68] Update PureScript submodule (moved repo, grammar tweak) Updated grammar to support triple-quoted strings properly: https://github.com/purescript-contrib/atom-language-purescript/commit/0c53f8162e40f2942d3df53c928c8ce597be3e12 atom-language-purescript repo has been moved to purescript-contrib org, update the URL. --- .gitmodules | 2 +- vendor/grammars/atom-language-purescript | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index 8234378a..53ff80c7 100644 --- a/.gitmodules +++ b/.gitmodules @@ -658,7 +658,7 @@ url = https://github.com/rpavlick/language-ncl.git [submodule "vendor/grammars/atom-language-purescript"] path = vendor/grammars/atom-language-purescript - url = https://github.com/freebroccolo/atom-language-purescript + url = https://github.com/purescript-contrib/atom-language-purescript [submodule "vendor/grammars/vue-syntax-highlight"] path = vendor/grammars/vue-syntax-highlight url = https://github.com/vuejs/vue-syntax-highlight diff --git a/vendor/grammars/atom-language-purescript b/vendor/grammars/atom-language-purescript index 21ed0c41..bd2b59f1 160000 --- a/vendor/grammars/atom-language-purescript +++ b/vendor/grammars/atom-language-purescript @@ -1 +1 @@ -Subproject commit 21ed0c417a143ca537afc5d65344acac3d88df94 +Subproject commit bd2b59f14eb4bd3b1f6d53eee959d7a6b523c073 From 70068f74f1f9c7dcd1133366749421a1acb28b41 Mon Sep 17 00:00:00 2001 From: AQNOUCH Mohammed Date: Fri, 1 Jan 2016 01:53:09 +0000 Subject: [PATCH 30/68] Updated copyright to 2016 --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index c0a52444..ca0844d1 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2011-2015 GitHub, Inc. +Copyright (c) 2011-2016 GitHub, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation From 048496723b1da98ab02b4b6d06174d107650c4c9 Mon Sep 17 00:00:00 2001 From: Andrew Plotkin Date: Fri, 1 Jan 2016 16:15:03 -0500 Subject: [PATCH 31/68] Include lots more forms of Inform7 syntax. Including embedded Inform6 syntax. --- samples/Inform 7/Trivial Extension.i7x | 69 +++++++++++++++++++++++++- samples/Inform 7/story.ni | 52 ++++++++++++++++++- 2 files changed, 119 insertions(+), 2 deletions(-) diff --git a/samples/Inform 7/Trivial Extension.i7x b/samples/Inform 7/Trivial Extension.i7x index 1aae1b85..1ba8316d 100644 --- a/samples/Inform 7/Trivial Extension.i7x +++ b/samples/Inform 7/Trivial Extension.i7x @@ -1,6 +1,73 @@ -Version 1 of Trivial Extension by Andrew Plotkin begins here. +Version 2 of Trivial Extension by Andrew Plotkin begins here. + +"This is the rubric of the extension." + +"provided for the Linguist package by Andrew Plotkin" + +[Note the two special quoted lines above.] A cow is a kind of animal. A cow can be purple. +Understand "cow" as a cow. +Understand "purple" as a purple cow. + +Check pushing a cow: + instead say "Cow-tipping, at your age?[paragraph break]Inconceivable." + +[Here are the possible levels of heading:] + +Volume One + +Text-line is always "A line of text." + +Book 2 + + Part the third - indented headings still count + +Chapter IV - not for release + +[Heading labels are case-insensitive.] + +section foobar + +[A line beginning "Volume" that does not have blank lines before and after it is *not* a header line. So the following is part of section foobar:] + +Measure is a kind of value. +Volume is a measure. Length is a measure. +Area is a measure. + +[And now some Inform 6 inclusions.] + +To say em -- running on: + (- style underline; -). +To say /em -- running on: + (- style roman; -). + +Include (- + +! Inform 6 comments start with ! marks and run to the end of the line. +Global cowcount; + +[ inform6func arg; + print "Here is some text; ", (address) 'dictword', ".^"; + cowcount++; ! increment this variable +]; + +Object i6cow + with name 'cow' 'animal', + with description "It looks like a cow."; + has animate scenery; + +-) after "Global Variables" in "Output.i6t". + Trivial Extension ends here. +---- DOCUMENTATION ---- + +Everything after the "---- DOCUMENTATION ----" line is documentation, so it should have the comment style. + +However, tab-indented lines are sample Inform code within the documentation: + + Horns are a kind of thing. Every cow has horns. + +So we need to allow for that. diff --git a/samples/Inform 7/story.ni b/samples/Inform 7/story.ni index f8873369..4fd774b5 100644 --- a/samples/Inform 7/story.ni +++ b/samples/Inform 7/story.ni @@ -2,11 +2,61 @@ Include Trivial Extension by Andrew Plotkin. +Volume 1 - overview + +Chapter - setting the scene + The Kitchen is a room. -[This kitchen is modelled after the one in Zork, although it lacks the detail to establish this to the player.] +[Comment: this kitchen is modelled after the one in Zork, although it lacks the detail to establish this to the player.] + +Section - the kitchen table + +The spicerack is a container in the Kitchen. + +Table of Spices +Name Flavor +"cinnamon" 5 +"nutmeg" 4 +"szechuan pepper" 8 + +The description of the spicerack is "It's mostly empty." + +Chapter - a character A purple cow called Gelett is in the Kitchen. +[This comment spans multiple lines.. + +...and this line contains [nested square[] brackets]... + +...which is legal in Inform 7.] + Instead of examining Gelett: say "You'd rather see than be one." + +Instead of examining Gelett: + say "You'd rather see than be one." + +Check smelling Gelett: + say "This text contains several lines. + +A blank line is displayed as a paragraph break, +but a simple line break is not."; + stop the action. + +Section - cow catching + +Gelett has a number called the mooness. + +Instead of taking Gelett: + increment the mooness of Gelett; + if the mooness of Gelett is one: + say "Gelett moos once."; + else: + say "Gelett moos [mooness of Gelett in words] times."; + +Volume 2 - the turn cycle + +Every turn: + say "A turn passes[one of][or] placidly[or] idly[or] tediously[at random]." From 0f4cf11294b9ab029e6832c699419ccef3b4fcac Mon Sep 17 00:00:00 2001 From: Andrew Plotkin Date: Fri, 1 Jan 2016 16:34:23 -0500 Subject: [PATCH 32/68] Wrong semicolon. --- samples/Inform 7/Trivial Extension.i7x | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/Inform 7/Trivial Extension.i7x b/samples/Inform 7/Trivial Extension.i7x index 1ba8316d..8b7e14cd 100644 --- a/samples/Inform 7/Trivial Extension.i7x +++ b/samples/Inform 7/Trivial Extension.i7x @@ -55,7 +55,7 @@ Global cowcount; Object i6cow with name 'cow' 'animal', - with description "It looks like a cow."; + with description "It looks like a cow.", has animate scenery; -) after "Global Variables" in "Output.i6t". From 7060b116f46c465f4879c3e644042846263173dd Mon Sep 17 00:00:00 2001 From: Andrew Plotkin Date: Fri, 1 Jan 2016 16:54:55 -0500 Subject: [PATCH 33/68] Reword a little. --- samples/Inform 7/Trivial Extension.i7x | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/Inform 7/Trivial Extension.i7x b/samples/Inform 7/Trivial Extension.i7x index 8b7e14cd..df0412c3 100644 --- a/samples/Inform 7/Trivial Extension.i7x +++ b/samples/Inform 7/Trivial Extension.i7x @@ -45,7 +45,7 @@ To say /em -- running on: Include (- -! Inform 6 comments start with ! marks and run to the end of the line. +! Inform 6 comments start with a ! mark and run to the end of the line. Global cowcount; [ inform6func arg; From f488b9b9f793e0bddd45d29cdc770c4c5d9490ff Mon Sep 17 00:00:00 2001 From: Andrew Plotkin Date: Fri, 1 Jan 2016 17:22:06 -0500 Subject: [PATCH 34/68] Test strings and substs in a doc comment. --- samples/Inform 7/Trivial Extension.i7x | 1 + 1 file changed, 1 insertion(+) diff --git a/samples/Inform 7/Trivial Extension.i7x b/samples/Inform 7/Trivial Extension.i7x index df0412c3..e748c820 100644 --- a/samples/Inform 7/Trivial Extension.i7x +++ b/samples/Inform 7/Trivial Extension.i7x @@ -69,5 +69,6 @@ Everything after the "---- DOCUMENTATION ----" line is documentation, so it shou However, tab-indented lines are sample Inform code within the documentation: Horns are a kind of thing. Every cow has horns. + say "Moo[if the noun is purple] indigo[end if]." So we need to allow for that. From c0b9e2c3f416eb8605cac9a27c0ab4300fa7ec78 Mon Sep 17 00:00:00 2001 From: Andrew Plotkin Date: Fri, 1 Jan 2016 21:52:51 -0500 Subject: [PATCH 35/68] Update comment. --- samples/Inform 7/Trivial Extension.i7x | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/Inform 7/Trivial Extension.i7x b/samples/Inform 7/Trivial Extension.i7x index e748c820..40f53dab 100644 --- a/samples/Inform 7/Trivial Extension.i7x +++ b/samples/Inform 7/Trivial Extension.i7x @@ -30,7 +30,7 @@ Chapter IV - not for release section foobar -[A line beginning "Volume" that does not have blank lines before and after it is *not* a header line. So the following is part of section foobar:] +[A line beginning "Volume" that does not have blank lines before and after it is *not* a header line. So the following should all be part of section foobar. Sadly, the "Volume is..." line gets colored as a header, because Atom's regexp model can't recognize "thing with blank lines before and after"!] Measure is a kind of value. Volume is a measure. Length is a measure. From 537b83c75935b8b8b2cc5aa48af87d9e13edc304 Mon Sep 17 00:00:00 2001 From: Andrew Plotkin Date: Fri, 1 Jan 2016 22:23:01 -0500 Subject: [PATCH 36/68] Switch from Sublime-Inform grammar to language-inform7 grammar for Inform 7 source code. --- .gitmodules | 3 +++ grammars.yml | 4 ++-- lib/linguist/languages.yml | 2 +- vendor/grammars/language-inform7 | 1 + 4 files changed, 7 insertions(+), 3 deletions(-) create mode 160000 vendor/grammars/language-inform7 diff --git a/.gitmodules b/.gitmodules index 8234378a..726e5e12 100644 --- a/.gitmodules +++ b/.gitmodules @@ -701,3 +701,6 @@ [submodule "vendor/grammars/language-renpy"] path = vendor/grammars/language-renpy url = https://github.com/williamd1k0/language-renpy.git +[submodule "vendor/grammars/language-inform7"] + path = vendor/grammars/language-inform7 + url = https://github.com/erkyrath/language-inform7 diff --git a/grammars.yml b/grammars.yml index 8f7b1173..bca5a898 100644 --- a/grammars.yml +++ b/grammars.yml @@ -103,8 +103,6 @@ vendor/grammars/Sublime-Coq: - source.coq vendor/grammars/Sublime-HTTP: - source.httpspec -vendor/grammars/Sublime-Inform: -- source.Inform7 vendor/grammars/Sublime-Lasso: - file.lasso vendor/grammars/Sublime-Logos: @@ -342,6 +340,8 @@ vendor/grammars/language-gfm: - source.gfm vendor/grammars/language-hy: - source.hy +vendor/grammars/language-inform7: +- source.inform7 vendor/grammars/language-javascript: - source.js - source.js.regexp diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index c5eb9e6a..257cd7fc 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -1510,7 +1510,7 @@ Inform 7: extensions: - .ni - .i7x - tm_scope: source.Inform7 + tm_scope: source.inform7 aliases: - i7 - inform7 diff --git a/vendor/grammars/language-inform7 b/vendor/grammars/language-inform7 new file mode 160000 index 00000000..b953a1ef --- /dev/null +++ b/vendor/grammars/language-inform7 @@ -0,0 +1 @@ +Subproject commit b953a1efedcff21091ba3b7e7fbcd1040c1f02bb From b790a49282d970f0123965c31c484469eb6dc53a Mon Sep 17 00:00:00 2001 From: Andrew Plotkin Date: Fri, 1 Jan 2016 22:25:11 -0500 Subject: [PATCH 37/68] Remove the old Sublime-Inform grammar from the submodules list. --- .gitmodules | 3 --- 1 file changed, 3 deletions(-) diff --git a/.gitmodules b/.gitmodules index 726e5e12..f52dc313 100644 --- a/.gitmodules +++ b/.gitmodules @@ -25,9 +25,6 @@ [submodule "vendor/grammars/Sublime-REBOL"] path = vendor/grammars/Sublime-REBOL url = https://github.com/Oldes/Sublime-REBOL -[submodule "vendor/grammars/Sublime-Inform"] - path = vendor/grammars/Sublime-Inform - url = https://github.com/PogiNate/Sublime-Inform [submodule "vendor/grammars/autoitv3-tmbundle"] path = vendor/grammars/autoitv3-tmbundle url = https://github.com/Red-Nova-Technologies/autoitv3-tmbundle From 5d4cad639430ae917653da29fcfca79566019fb9 Mon Sep 17 00:00:00 2001 From: Andrew Plotkin Date: Fri, 1 Jan 2016 22:40:23 -0500 Subject: [PATCH 38/68] Remove the old submodule entry. --- vendor/grammars/Sublime-Inform | 1 - 1 file changed, 1 deletion(-) delete mode 160000 vendor/grammars/Sublime-Inform diff --git a/vendor/grammars/Sublime-Inform b/vendor/grammars/Sublime-Inform deleted file mode 160000 index 8db129b8..00000000 --- a/vendor/grammars/Sublime-Inform +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 8db129b8389044a6660ca232566651c8fe3ab646 From 3e665099ac84e0b2307c034aa4d2da7a1726e60e Mon Sep 17 00:00:00 2001 From: Arfon Smith Date: Sat, 2 Jan 2016 22:04:16 -0500 Subject: [PATCH 39/68] Updating Python grammars --- grammars.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/grammars.yml b/grammars.yml index 004f9cb0..2b7b5c8d 100644 --- a/grammars.yml +++ b/grammars.yml @@ -69,6 +69,9 @@ vendor/grammars/Lean.tmbundle: - source.lean vendor/grammars/LiveScript.tmbundle: - source.livescript +vendor/grammars/MagicPython: +- source.python +- source.regexp.python vendor/grammars/Modelica/: - source.modelica vendor/grammars/NSIS: @@ -355,10 +358,7 @@ vendor/grammars/language-maxscript: - source.maxscript vendor/grammars/language-ncl: - source.ncl -vendor/grammars/MagicPython: -- source.python vendor/grammars/language-python: -- source.regexp.python - text.python.console - text.python.traceback vendor/grammars/language-renpy: From a944769d61795e2d7aea17e502996dbf250f261c Mon Sep 17 00:00:00 2001 From: Arfon Smith Date: Sat, 2 Jan 2016 22:18:27 -0500 Subject: [PATCH 40/68] Grammars update --- vendor/grammars/Elm.tmLanguage | 2 +- vendor/grammars/Handlebars | 2 +- vendor/grammars/MagicPython | 2 +- vendor/grammars/abap.tmbundle | 2 +- vendor/grammars/atom-fsharp | 2 +- vendor/grammars/atom-language-stan | 2 +- vendor/grammars/ats.sublime | 2 +- vendor/grammars/c.tmbundle | 2 +- vendor/grammars/elixir-tmbundle | 2 +- vendor/grammars/factor | 2 +- vendor/grammars/html.tmbundle | 2 +- vendor/grammars/json.tmbundle | 2 +- vendor/grammars/language-babel | 2 +- vendor/grammars/language-clojure | 2 +- vendor/grammars/language-javascript | 2 +- vendor/grammars/language-maxscript | 2 +- vendor/grammars/latex.tmbundle | 2 +- vendor/grammars/make.tmbundle | 2 +- vendor/grammars/sas.tmbundle | 2 +- vendor/grammars/sublime-mask | 2 +- vendor/grammars/sublime-text-ox | 2 +- vendor/grammars/sublime-typescript | 2 +- vendor/grammars/sublime_cobol | 2 +- vendor/grammars/swift.tmbundle | 2 +- vendor/grammars/verilog.tmbundle | 2 +- 25 files changed, 25 insertions(+), 25 deletions(-) diff --git a/vendor/grammars/Elm.tmLanguage b/vendor/grammars/Elm.tmLanguage index ab79692f..494145bd 160000 --- a/vendor/grammars/Elm.tmLanguage +++ b/vendor/grammars/Elm.tmLanguage @@ -1 +1 @@ -Subproject commit ab79692fed628c9b08b7d11f21d9288cdf6b4e84 +Subproject commit 494145bded21fd6e18454d9d9d5aa8b67ced1b96 diff --git a/vendor/grammars/Handlebars b/vendor/grammars/Handlebars index 60309828..7e5396d9 160000 --- a/vendor/grammars/Handlebars +++ b/vendor/grammars/Handlebars @@ -1 +1 @@ -Subproject commit 6030982880f95b887daebf1ccb30ad1c615e0fc2 +Subproject commit 7e5396d9be8b05c705be614afb022f755a7edc17 diff --git a/vendor/grammars/MagicPython b/vendor/grammars/MagicPython index 13bf5348..d7855f98 160000 --- a/vendor/grammars/MagicPython +++ b/vendor/grammars/MagicPython @@ -1 +1 @@ -Subproject commit 13bf534834036911c57d15c40242e45783aad1ba +Subproject commit d7855f98ce6fbe0703c2753b0deae263a27fd8f8 diff --git a/vendor/grammars/abap.tmbundle b/vendor/grammars/abap.tmbundle index 5b7e30fd..e4e71dee 160000 --- a/vendor/grammars/abap.tmbundle +++ b/vendor/grammars/abap.tmbundle @@ -1 +1 @@ -Subproject commit 5b7e30fd8fb4bcaa78c14aaabebe98dda8ab7bbf +Subproject commit e4e71dee51b1fe6973e11d6a617de72991be4ecb diff --git a/vendor/grammars/atom-fsharp b/vendor/grammars/atom-fsharp index 500cdb5f..4376748e 160000 --- a/vendor/grammars/atom-fsharp +++ b/vendor/grammars/atom-fsharp @@ -1 +1 @@ -Subproject commit 500cdb5f045ff6a121fa2732bbd56c4da18bdae1 +Subproject commit 4376748ed29b5e104cd0dd807c8c9f09a09ec361 diff --git a/vendor/grammars/atom-language-stan b/vendor/grammars/atom-language-stan index 72c626ae..f6911bef 160000 --- a/vendor/grammars/atom-language-stan +++ b/vendor/grammars/atom-language-stan @@ -1 +1 @@ -Subproject commit 72c626ae96792809447ed4b4e20b8792025b7b95 +Subproject commit f6911bef091e155098f113ec5aaa89bd55283705 diff --git a/vendor/grammars/ats.sublime b/vendor/grammars/ats.sublime index 1ecc2b01..a3f24abb 160000 --- a/vendor/grammars/ats.sublime +++ b/vendor/grammars/ats.sublime @@ -1 +1 @@ -Subproject commit 1ecc2b011ed9d58bd5435b7440697e300591a23d +Subproject commit a3f24abbe7043adc0ad798711467edae33cf89f0 diff --git a/vendor/grammars/c.tmbundle b/vendor/grammars/c.tmbundle index f6048afe..f1424d49 160000 --- a/vendor/grammars/c.tmbundle +++ b/vendor/grammars/c.tmbundle @@ -1 +1 @@ -Subproject commit f6048afe693e50adf47d46aba791f95c9138823e +Subproject commit f1424d49dd879f6c5a67d3afe6c48a17f538345a diff --git a/vendor/grammars/elixir-tmbundle b/vendor/grammars/elixir-tmbundle index 4b502e43..13d94e0e 160000 --- a/vendor/grammars/elixir-tmbundle +++ b/vendor/grammars/elixir-tmbundle @@ -1 +1 @@ -Subproject commit 4b502e436d150f6af97b085a763179941b3c3778 +Subproject commit 13d94e0ec98c2447adc80230708afa94012ec825 diff --git a/vendor/grammars/factor b/vendor/grammars/factor index 9d956025..7b289493 160000 --- a/vendor/grammars/factor +++ b/vendor/grammars/factor @@ -1 +1 @@ -Subproject commit 9d95602591cf231a3f2332269d81d60068d1aa76 +Subproject commit 7b289493316955db1649eb2745a2db7a78a4c9f9 diff --git a/vendor/grammars/html.tmbundle b/vendor/grammars/html.tmbundle index a0bc0c47..36b90bc1 160000 --- a/vendor/grammars/html.tmbundle +++ b/vendor/grammars/html.tmbundle @@ -1 +1 @@ -Subproject commit a0bc0c479bde0d46bfec863f3262c06e768eec2d +Subproject commit 36b90bc113cdd59dcaeb0885103f9b6a773939e1 diff --git a/vendor/grammars/json.tmbundle b/vendor/grammars/json.tmbundle index 06b38d55..0762cbdc 160000 --- a/vendor/grammars/json.tmbundle +++ b/vendor/grammars/json.tmbundle @@ -1 +1 @@ -Subproject commit 06b38d55326363b63c26943c6e0213988180dbca +Subproject commit 0762cbdcb34dd98801b6323e75332cd4c9dbc07e diff --git a/vendor/grammars/language-babel b/vendor/grammars/language-babel index 4b0e9658..20c649bc 160000 --- a/vendor/grammars/language-babel +++ b/vendor/grammars/language-babel @@ -1 +1 @@ -Subproject commit 4b0e9658e0a8bab71983ea5941f99b552cf00848 +Subproject commit 20c649bcc79b9fd9a72e4b6e1fa11e14d553a9d8 diff --git a/vendor/grammars/language-clojure b/vendor/grammars/language-clojure index 12b73d41..a0193ad2 160000 --- a/vendor/grammars/language-clojure +++ b/vendor/grammars/language-clojure @@ -1 +1 @@ -Subproject commit 12b73d41a0e0437b4899f73c5ac65caad74ac2c1 +Subproject commit a0193ad2a9797033649e665083f09249d2d098fc diff --git a/vendor/grammars/language-javascript b/vendor/grammars/language-javascript index b227486f..162309ab 160000 --- a/vendor/grammars/language-javascript +++ b/vendor/grammars/language-javascript @@ -1 +1 @@ -Subproject commit b227486fc84c8af8f7439e96424c2f3724c26e44 +Subproject commit 162309ab8525c0f1b8602514e6aa347c9889437e diff --git a/vendor/grammars/language-maxscript b/vendor/grammars/language-maxscript index a465c9ca..56f89d27 160000 --- a/vendor/grammars/language-maxscript +++ b/vendor/grammars/language-maxscript @@ -1 +1 @@ -Subproject commit a465c9ca4adf71b8524021acb3cbe447db19753b +Subproject commit 56f89d27153d27f3fed5f8b50fda8fa728fa2911 diff --git a/vendor/grammars/latex.tmbundle b/vendor/grammars/latex.tmbundle index bb4edc2b..3be8cd92 160000 --- a/vendor/grammars/latex.tmbundle +++ b/vendor/grammars/latex.tmbundle @@ -1 +1 @@ -Subproject commit bb4edc2b6af0d95c2084511ce3afc324a9c83da5 +Subproject commit 3be8cd9208fb2aa5e9fe4ebd0074f55433bca7e0 diff --git a/vendor/grammars/make.tmbundle b/vendor/grammars/make.tmbundle index e0d96dc1..1a1827da 160000 --- a/vendor/grammars/make.tmbundle +++ b/vendor/grammars/make.tmbundle @@ -1 +1 @@ -Subproject commit e0d96dc1d6ec8ef9ee421da9963d231256a2f22d +Subproject commit 1a1827da81e20fdce56e2658451340c070ca44b7 diff --git a/vendor/grammars/sas.tmbundle b/vendor/grammars/sas.tmbundle index 30fa23fc..3759a197 160000 --- a/vendor/grammars/sas.tmbundle +++ b/vendor/grammars/sas.tmbundle @@ -1 +1 @@ -Subproject commit 30fa23fc34cf5147bcfd0759a4cbf83cd987337d +Subproject commit 3759a19719d3c4c4979087be12adbcaa02a7bca3 diff --git a/vendor/grammars/sublime-mask b/vendor/grammars/sublime-mask index 819f743e..8b2a4b33 160000 --- a/vendor/grammars/sublime-mask +++ b/vendor/grammars/sublime-mask @@ -1 +1 @@ -Subproject commit 819f743efdcfa0d90a5b935cfaad799bd4ca7e6a +Subproject commit 8b2a4b3300ef30f293218521cd2aa94335cad114 diff --git a/vendor/grammars/sublime-text-ox b/vendor/grammars/sublime-text-ox index ed96fb6a..5cbc2c65 160000 --- a/vendor/grammars/sublime-text-ox +++ b/vendor/grammars/sublime-text-ox @@ -1 +1 @@ -Subproject commit ed96fb6afc0321c7d3ce219d69b56c591f0938a0 +Subproject commit 5cbc2c655b4b771d34b75bbe962964455510644b diff --git a/vendor/grammars/sublime-typescript b/vendor/grammars/sublime-typescript index 6540de45..d2c76b8c 160000 --- a/vendor/grammars/sublime-typescript +++ b/vendor/grammars/sublime-typescript @@ -1 +1 @@ -Subproject commit 6540de452eb08766d379fe10aba4bce9eb645ec0 +Subproject commit d2c76b8c8262c9a848d4814662a0c8e13d1e2a6a diff --git a/vendor/grammars/sublime_cobol b/vendor/grammars/sublime_cobol index 19ae2ba3..ad15734a 160000 --- a/vendor/grammars/sublime_cobol +++ b/vendor/grammars/sublime_cobol @@ -1 +1 @@ -Subproject commit 19ae2ba33ebc0b439051a499888acd4f916f83b9 +Subproject commit ad15734a4a9798a006525f53a968565fee1411b1 diff --git a/vendor/grammars/swift.tmbundle b/vendor/grammars/swift.tmbundle index 75a3dea1..e6375f4d 160000 --- a/vendor/grammars/swift.tmbundle +++ b/vendor/grammars/swift.tmbundle @@ -1 +1 @@ -Subproject commit 75a3dea1841cd153c71101fa30841a269ab3c6a7 +Subproject commit e6375f4dbb9c5a715e1924b1e9fc1056f0b3dee7 diff --git a/vendor/grammars/verilog.tmbundle b/vendor/grammars/verilog.tmbundle index 7627ae50..f55a9859 160000 --- a/vendor/grammars/verilog.tmbundle +++ b/vendor/grammars/verilog.tmbundle @@ -1 +1 @@ -Subproject commit 7627ae507278edd9534c3f905d48736875658bf5 +Subproject commit f55a9859f48bb6189992b86748c127044131a785 From ca4bc6110f2a99bc54072d4924c28dec5e3b101a Mon Sep 17 00:00:00 2001 From: Paul Chaignon Date: Sun, 3 Jan 2016 11:47:09 +0100 Subject: [PATCH 41/68] Remove 'use strict' from Perl6 sample --- samples/Perl6/hash.t | 1 - 1 file changed, 1 deletion(-) diff --git a/samples/Perl6/hash.t b/samples/Perl6/hash.t index 7bff849e..24ba2b50 100644 --- a/samples/Perl6/hash.t +++ b/samples/Perl6/hash.t @@ -12,7 +12,6 @@ unless EVAL 'EVAL("1", :lang)' { die unless EVAL(q/ package My::Hash; -use strict; sub new { my ($class, $ref) = @_; From 5906fa81bb3782cd8d79390f2544da4b71fd8654 Mon Sep 17 00:00:00 2001 From: Louis Pilfold Date: Mon, 4 Jan 2016 11:12:39 +0000 Subject: [PATCH 42/68] support for Erlang leex (.xrl) / yecc (.yrl) files --- lib/linguist/languages.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 7d0b4ac3..6a302889 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -939,6 +939,8 @@ Erlang: - .es - .escript - .hrl + - .xrl + - .yrl filenames: - rebar.config - rebar.config.lock From 0a7aab947cbfc8924a6721f81707b01f2717e048 Mon Sep 17 00:00:00 2001 From: Louis Pilfold Date: Mon, 4 Jan 2016 12:16:32 +0000 Subject: [PATCH 43/68] Add the Elixir parser as an example of erlang .yrl --- samples/Erlang/elixir_parser.yrl | 856 +++++++++++++++++++++++++++++++ 1 file changed, 856 insertions(+) create mode 100644 samples/Erlang/elixir_parser.yrl diff --git a/samples/Erlang/elixir_parser.yrl b/samples/Erlang/elixir_parser.yrl new file mode 100644 index 00000000..f49f8539 --- /dev/null +++ b/samples/Erlang/elixir_parser.yrl @@ -0,0 +1,856 @@ +Nonterminals + grammar expr_list + expr container_expr block_expr access_expr + no_parens_expr no_parens_zero_expr no_parens_one_expr no_parens_one_ambig_expr + bracket_expr bracket_at_expr bracket_arg matched_expr unmatched_expr max_expr + unmatched_op_expr matched_op_expr no_parens_op_expr no_parens_many_expr + comp_op_eol at_op_eol unary_op_eol and_op_eol or_op_eol capture_op_eol + add_op_eol mult_op_eol two_op_eol three_op_eol pipe_op_eol stab_op_eol + arrow_op_eol match_op_eol when_op_eol in_op_eol in_match_op_eol + type_op_eol rel_op_eol + open_paren close_paren empty_paren eoe + list list_args open_bracket close_bracket + tuple open_curly close_curly + bit_string open_bit close_bit + map map_op map_close map_args map_expr struct_op + assoc_op_eol assoc_expr assoc_base assoc_update assoc_update_kw assoc + container_args_base container_args + call_args_parens_expr call_args_parens_base call_args_parens parens_call + call_args_no_parens_one call_args_no_parens_ambig call_args_no_parens_expr + call_args_no_parens_comma_expr call_args_no_parens_all call_args_no_parens_many + call_args_no_parens_many_strict + stab stab_eoe stab_expr stab_op_eol_and_expr stab_parens_many + kw_eol kw_base kw call_args_no_parens_kw_expr call_args_no_parens_kw + dot_op dot_alias dot_alias_container + dot_identifier dot_op_identifier dot_do_identifier + dot_paren_identifier dot_bracket_identifier + do_block fn_eoe do_eoe end_eoe block_eoe block_item block_list + . + +Terminals + identifier kw_identifier kw_identifier_safe kw_identifier_unsafe bracket_identifier + paren_identifier do_identifier block_identifier + fn 'end' aliases + number atom atom_safe atom_unsafe bin_string list_string sigil + dot_call_op op_identifier + comp_op at_op unary_op and_op or_op arrow_op match_op in_op in_match_op + type_op dual_op add_op mult_op two_op three_op pipe_op stab_op when_op assoc_op + capture_op rel_op + 'true' 'false' 'nil' 'do' eol ';' ',' '.' + '(' ')' '[' ']' '{' '}' '<<' '>>' '%{}' '%' + . + +Rootsymbol grammar. + +%% Two shift/reduce conflicts coming from call_args_parens. +Expect 2. + +%% Changes in ops and precedence should be reflected on lib/elixir/lib/macro.ex +%% Note though the operator => in practice has lower precedence than all others, +%% its entry in the table is only to support the %{user | foo => bar} syntax. +Left 5 do. +Right 10 stab_op_eol. %% -> +Left 20 ','. +Nonassoc 30 capture_op_eol. %% & +Left 40 in_match_op_eol. %% <-, \\ (allowed in matches along =) +Right 50 when_op_eol. %% when +Right 60 type_op_eol. %% :: +Right 70 pipe_op_eol. %% | +Right 80 assoc_op_eol. %% => +Right 90 match_op_eol. %% = +Left 130 or_op_eol. %% ||, |||, or +Left 140 and_op_eol. %% &&, &&&, and +Left 150 comp_op_eol. %% ==, !=, =~, ===, !== +Left 160 rel_op_eol. %% <, >, <=, >= +Left 170 arrow_op_eol. %% |>, <<<, >>>, ~>>, <<~, ~>, <~, <~>, <|> +Left 180 in_op_eol. %% in +Left 190 three_op_eol. %% ^^^ +Right 200 two_op_eol. %% ++, --, .., <> +Left 210 add_op_eol. %% +, - +Left 220 mult_op_eol. %% *, / +Nonassoc 300 unary_op_eol. %% +, -, !, ^, not, ~~~ +Left 310 dot_call_op. +Left 310 dot_op. %% . +Nonassoc 320 at_op_eol. %% @ +Nonassoc 330 dot_identifier. + +%%% MAIN FLOW OF EXPRESSIONS + +grammar -> eoe : nil. +grammar -> expr_list : to_block('$1'). +grammar -> eoe expr_list : to_block('$2'). +grammar -> expr_list eoe : to_block('$1'). +grammar -> eoe expr_list eoe : to_block('$2'). +grammar -> '$empty' : nil. + +% Note expressions are on reverse order +expr_list -> expr : ['$1']. +expr_list -> expr_list eoe expr : ['$3'|'$1']. + +expr -> matched_expr : '$1'. +expr -> no_parens_expr : '$1'. +expr -> unmatched_expr : '$1'. + +%% In Elixir we have three main call syntaxes: with parentheses, +%% without parentheses and with do blocks. They are represented +%% in the AST as matched, no_parens and unmatched. +%% +%% Calls without parentheses are further divided according to how +%% problematic they are: +%% +%% (a) no_parens_one: a call with one unproblematic argument +%% (e.g. `f a` or `f g a` and similar) (includes unary operators) +%% +%% (b) no_parens_many: a call with several arguments (e.g. `f a, b`) +%% +%% (c) no_parens_one_ambig: a call with one argument which is +%% itself a no_parens_many or no_parens_one_ambig (e.g. `f g a, b` +%% or `f g h a, b` and similar) +%% +%% Note, in particular, that no_parens_one_ambig expressions are +%% ambiguous and are interpreted such that the outer function has +%% arity 1 (e.g. `f g a, b` is interpreted as `f(g(a, b))` rather +%% than `f(g(a), b)`). Hence the name, no_parens_one_ambig. +%% +%% The distinction is required because we can't, for example, have +%% a function call with a do block as argument inside another do +%% block call, unless there are parentheses: +%% +%% if if true do true else false end do #=> invalid +%% if(if true do true else false end) do #=> valid +%% +%% Similarly, it is not possible to nest calls without parentheses +%% if their arity is more than 1: +%% +%% foo a, bar b, c #=> invalid +%% foo(a, bar b, c) #=> invalid +%% foo bar a, b #=> valid +%% foo a, bar(b, c) #=> valid +%% +%% So the different grammar rules need to take into account +%% if calls without parentheses are do blocks in particular +%% segments and act accordingly. +matched_expr -> matched_expr matched_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +matched_expr -> unary_op_eol matched_expr : build_unary_op('$1', '$2'). +matched_expr -> at_op_eol matched_expr : build_unary_op('$1', '$2'). +matched_expr -> capture_op_eol matched_expr : build_unary_op('$1', '$2'). +matched_expr -> no_parens_one_expr : '$1'. +matched_expr -> no_parens_zero_expr : '$1'. +matched_expr -> access_expr : '$1'. +matched_expr -> access_expr kw_identifier : throw_invalid_kw_identifier('$2'). + +unmatched_expr -> matched_expr unmatched_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +unmatched_expr -> unmatched_expr matched_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +unmatched_expr -> unmatched_expr unmatched_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +unmatched_expr -> unmatched_expr no_parens_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +unmatched_expr -> unary_op_eol expr : build_unary_op('$1', '$2'). +unmatched_expr -> at_op_eol expr : build_unary_op('$1', '$2'). +unmatched_expr -> capture_op_eol expr : build_unary_op('$1', '$2'). +unmatched_expr -> block_expr : '$1'. + +no_parens_expr -> matched_expr no_parens_op_expr : build_op(element(1, '$2'), '$1', element(2, '$2')). +no_parens_expr -> unary_op_eol no_parens_expr : build_unary_op('$1', '$2'). +no_parens_expr -> at_op_eol no_parens_expr : build_unary_op('$1', '$2'). +no_parens_expr -> capture_op_eol no_parens_expr : build_unary_op('$1', '$2'). +no_parens_expr -> no_parens_one_ambig_expr : '$1'. +no_parens_expr -> no_parens_many_expr : '$1'. + +block_expr -> parens_call call_args_parens do_block : build_identifier('$1', '$2' ++ '$3'). +block_expr -> parens_call call_args_parens call_args_parens do_block : build_nested_parens('$1', '$2', '$3' ++ '$4'). +block_expr -> dot_do_identifier do_block : build_identifier('$1', '$2'). +block_expr -> dot_identifier call_args_no_parens_all do_block : build_identifier('$1', '$2' ++ '$3'). + +matched_op_expr -> match_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> add_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> mult_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> two_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> three_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> and_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> or_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> in_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> in_match_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> type_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> when_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> pipe_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> comp_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> rel_op_eol matched_expr : {'$1', '$2'}. +matched_op_expr -> arrow_op_eol matched_expr : {'$1', '$2'}. +%% Warn for no parens subset +matched_op_expr -> arrow_op_eol no_parens_one_expr : warn_pipe('$1', '$2'), {'$1', '$2'}. + +unmatched_op_expr -> match_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> add_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> mult_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> two_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> three_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> and_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> or_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> in_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> in_match_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> type_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> when_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> pipe_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> comp_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> rel_op_eol unmatched_expr : {'$1', '$2'}. +unmatched_op_expr -> arrow_op_eol unmatched_expr : {'$1', '$2'}. + +no_parens_op_expr -> match_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> add_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> mult_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> two_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> three_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> and_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> or_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> in_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> in_match_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> type_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> when_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> pipe_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> comp_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> rel_op_eol no_parens_expr : {'$1', '$2'}. +no_parens_op_expr -> arrow_op_eol no_parens_expr : {'$1', '$2'}. +%% Warn for no parens subset +no_parens_op_expr -> arrow_op_eol no_parens_one_ambig_expr : warn_pipe('$1', '$2'), {'$1', '$2'}. +no_parens_op_expr -> arrow_op_eol no_parens_many_expr : warn_pipe('$1', '$2'), {'$1', '$2'}. + +%% Allow when (and only when) with keywords +no_parens_op_expr -> when_op_eol call_args_no_parens_kw : {'$1', '$2'}. + +no_parens_one_ambig_expr -> dot_op_identifier call_args_no_parens_ambig : build_identifier('$1', '$2'). +no_parens_one_ambig_expr -> dot_identifier call_args_no_parens_ambig : build_identifier('$1', '$2'). + +no_parens_many_expr -> dot_op_identifier call_args_no_parens_many_strict : build_identifier('$1', '$2'). +no_parens_many_expr -> dot_identifier call_args_no_parens_many_strict : build_identifier('$1', '$2'). + +no_parens_one_expr -> dot_op_identifier call_args_no_parens_one : build_identifier('$1', '$2'). +no_parens_one_expr -> dot_identifier call_args_no_parens_one : build_identifier('$1', '$2'). +no_parens_zero_expr -> dot_do_identifier : build_identifier('$1', nil). +no_parens_zero_expr -> dot_identifier : build_identifier('$1', nil). + +%% From this point on, we just have constructs that can be +%% used with the access syntax. Notice that (dot_)identifier +%% is not included in this list simply because the tokenizer +%% marks identifiers followed by brackets as bracket_identifier. +access_expr -> bracket_at_expr : '$1'. +access_expr -> bracket_expr : '$1'. +access_expr -> at_op_eol number : build_unary_op('$1', ?exprs('$2')). +access_expr -> unary_op_eol number : build_unary_op('$1', ?exprs('$2')). +access_expr -> capture_op_eol number : build_unary_op('$1', ?exprs('$2')). +access_expr -> fn_eoe stab end_eoe : build_fn('$1', reverse('$2')). +access_expr -> open_paren stab close_paren : build_stab(reverse('$2')). +access_expr -> open_paren stab ';' close_paren : build_stab(reverse('$2')). +access_expr -> open_paren ';' stab ';' close_paren : build_stab(reverse('$3')). +access_expr -> open_paren ';' stab close_paren : build_stab(reverse('$3')). +access_expr -> open_paren ';' close_paren : build_stab([]). +access_expr -> empty_paren : nil. +access_expr -> number : ?exprs('$1'). +access_expr -> list : element(1, '$1'). +access_expr -> map : '$1'. +access_expr -> tuple : '$1'. +access_expr -> 'true' : ?id('$1'). +access_expr -> 'false' : ?id('$1'). +access_expr -> 'nil' : ?id('$1'). +access_expr -> bin_string : build_bin_string('$1'). +access_expr -> list_string : build_list_string('$1'). +access_expr -> bit_string : '$1'. +access_expr -> sigil : build_sigil('$1'). +access_expr -> max_expr : '$1'. + +%% Aliases and properly formed calls. Used by map_expr. +max_expr -> atom : ?exprs('$1'). +max_expr -> atom_safe : build_quoted_atom('$1', true). +max_expr -> atom_unsafe : build_quoted_atom('$1', false). +max_expr -> parens_call call_args_parens : build_identifier('$1', '$2'). +max_expr -> parens_call call_args_parens call_args_parens : build_nested_parens('$1', '$2', '$3'). +max_expr -> dot_alias : '$1'. + +bracket_arg -> open_bracket kw close_bracket : build_list('$1', '$2'). +bracket_arg -> open_bracket container_expr close_bracket : build_list('$1', '$2'). +bracket_arg -> open_bracket container_expr ',' close_bracket : build_list('$1', '$2'). + +bracket_expr -> dot_bracket_identifier bracket_arg : build_access(build_identifier('$1', nil), '$2'). +bracket_expr -> access_expr bracket_arg : build_access('$1', '$2'). + +bracket_at_expr -> at_op_eol dot_bracket_identifier bracket_arg : + build_access(build_unary_op('$1', build_identifier('$2', nil)), '$3'). +bracket_at_expr -> at_op_eol access_expr bracket_arg : + build_access(build_unary_op('$1', '$2'), '$3'). + +%% Blocks + +do_block -> do_eoe 'end' : [[{do, nil}]]. +do_block -> do_eoe stab end_eoe : [[{do, build_stab(reverse('$2'))}]]. +do_block -> do_eoe block_list 'end' : [[{do, nil}|'$2']]. +do_block -> do_eoe stab_eoe block_list 'end' : [[{do, build_stab(reverse('$2'))}|'$3']]. + +eoe -> eol : '$1'. +eoe -> ';' : '$1'. +eoe -> eol ';' : '$1'. + +fn_eoe -> 'fn' : '$1'. +fn_eoe -> 'fn' eoe : '$1'. + +do_eoe -> 'do' : '$1'. +do_eoe -> 'do' eoe : '$1'. + +end_eoe -> 'end' : '$1'. +end_eoe -> eoe 'end' : '$2'. + +block_eoe -> block_identifier : '$1'. +block_eoe -> block_identifier eoe : '$1'. + +stab -> stab_expr : ['$1']. +stab -> stab eoe stab_expr : ['$3'|'$1']. + +stab_eoe -> stab : '$1'. +stab_eoe -> stab eoe : '$1'. + +%% Here, `element(1, Token)` is the stab operator, +%% while `element(2, Token)` is the expression. +stab_expr -> expr : + '$1'. +stab_expr -> stab_op_eol_and_expr : + build_op(element(1, '$1'), [], element(2, '$1')). +stab_expr -> empty_paren stab_op_eol_and_expr : + build_op(element(1, '$2'), [], element(2, '$2')). +stab_expr -> call_args_no_parens_all stab_op_eol_and_expr : + build_op(element(1, '$2'), unwrap_when(unwrap_splice('$1')), element(2, '$2')). +stab_expr -> stab_parens_many stab_op_eol_and_expr : + build_op(element(1, '$2'), unwrap_splice('$1'), element(2, '$2')). +stab_expr -> stab_parens_many when_op expr stab_op_eol_and_expr : + build_op(element(1, '$4'), [{'when', meta_from_token('$2'), unwrap_splice('$1') ++ ['$3']}], element(2, '$4')). + +stab_op_eol_and_expr -> stab_op_eol expr : {'$1', '$2'}. +stab_op_eol_and_expr -> stab_op_eol : warn_empty_stab_clause('$1'), {'$1', nil}. + +block_item -> block_eoe stab_eoe : {?exprs('$1'), build_stab(reverse('$2'))}. +block_item -> block_eoe : {?exprs('$1'), nil}. + +block_list -> block_item : ['$1']. +block_list -> block_item block_list : ['$1'|'$2']. + +%% Helpers + +open_paren -> '(' : '$1'. +open_paren -> '(' eol : '$1'. +close_paren -> ')' : '$1'. +close_paren -> eol ')' : '$2'. + +empty_paren -> open_paren ')' : '$1'. + +open_bracket -> '[' : '$1'. +open_bracket -> '[' eol : '$1'. +close_bracket -> ']' : '$1'. +close_bracket -> eol ']' : '$2'. + +open_bit -> '<<' : '$1'. +open_bit -> '<<' eol : '$1'. +close_bit -> '>>' : '$1'. +close_bit -> eol '>>' : '$2'. + +open_curly -> '{' : '$1'. +open_curly -> '{' eol : '$1'. +close_curly -> '}' : '$1'. +close_curly -> eol '}' : '$2'. + +% Operators + +add_op_eol -> add_op : '$1'. +add_op_eol -> add_op eol : '$1'. +add_op_eol -> dual_op : '$1'. +add_op_eol -> dual_op eol : '$1'. + +mult_op_eol -> mult_op : '$1'. +mult_op_eol -> mult_op eol : '$1'. + +two_op_eol -> two_op : '$1'. +two_op_eol -> two_op eol : '$1'. + +three_op_eol -> three_op : '$1'. +three_op_eol -> three_op eol : '$1'. + +pipe_op_eol -> pipe_op : '$1'. +pipe_op_eol -> pipe_op eol : '$1'. + +capture_op_eol -> capture_op : '$1'. +capture_op_eol -> capture_op eol : '$1'. + +unary_op_eol -> unary_op : '$1'. +unary_op_eol -> unary_op eol : '$1'. +unary_op_eol -> dual_op : '$1'. +unary_op_eol -> dual_op eol : '$1'. + +match_op_eol -> match_op : '$1'. +match_op_eol -> match_op eol : '$1'. + +and_op_eol -> and_op : '$1'. +and_op_eol -> and_op eol : '$1'. + +or_op_eol -> or_op : '$1'. +or_op_eol -> or_op eol : '$1'. + +in_op_eol -> in_op : '$1'. +in_op_eol -> in_op eol : '$1'. + +in_match_op_eol -> in_match_op : '$1'. +in_match_op_eol -> in_match_op eol : '$1'. + +type_op_eol -> type_op : '$1'. +type_op_eol -> type_op eol : '$1'. + +when_op_eol -> when_op : '$1'. +when_op_eol -> when_op eol : '$1'. + +stab_op_eol -> stab_op : '$1'. +stab_op_eol -> stab_op eol : '$1'. + +at_op_eol -> at_op : '$1'. +at_op_eol -> at_op eol : '$1'. + +comp_op_eol -> comp_op : '$1'. +comp_op_eol -> comp_op eol : '$1'. + +rel_op_eol -> rel_op : '$1'. +rel_op_eol -> rel_op eol : '$1'. + +arrow_op_eol -> arrow_op : '$1'. +arrow_op_eol -> arrow_op eol : '$1'. + +% Dot operator + +dot_op -> '.' : '$1'. +dot_op -> '.' eol : '$1'. + +dot_identifier -> identifier : '$1'. +dot_identifier -> matched_expr dot_op identifier : build_dot('$2', '$1', '$3'). + +dot_alias -> aliases : {'__aliases__', meta_from_token('$1', 0), ?exprs('$1')}. +dot_alias -> matched_expr dot_op aliases : build_dot_alias('$2', '$1', '$3'). +dot_alias -> matched_expr dot_op dot_alias_container : build_dot_container('$2', '$1', '$3'). + +dot_alias_container -> open_curly '}' : []. +dot_alias_container -> open_curly container_args close_curly : '$2'. + +dot_op_identifier -> op_identifier : '$1'. +dot_op_identifier -> matched_expr dot_op op_identifier : build_dot('$2', '$1', '$3'). + +dot_do_identifier -> do_identifier : '$1'. +dot_do_identifier -> matched_expr dot_op do_identifier : build_dot('$2', '$1', '$3'). + +dot_bracket_identifier -> bracket_identifier : '$1'. +dot_bracket_identifier -> matched_expr dot_op bracket_identifier : build_dot('$2', '$1', '$3'). + +dot_paren_identifier -> paren_identifier : '$1'. +dot_paren_identifier -> matched_expr dot_op paren_identifier : build_dot('$2', '$1', '$3'). + +parens_call -> dot_paren_identifier : '$1'. +parens_call -> matched_expr dot_call_op : {'.', meta_from_token('$2'), ['$1']}. % Fun/local calls + +% Function calls with no parentheses + +call_args_no_parens_expr -> matched_expr : '$1'. +call_args_no_parens_expr -> no_parens_expr : throw_no_parens_many_strict('$1'). + +call_args_no_parens_comma_expr -> matched_expr ',' call_args_no_parens_expr : ['$3', '$1']. +call_args_no_parens_comma_expr -> call_args_no_parens_comma_expr ',' call_args_no_parens_expr : ['$3'|'$1']. + +call_args_no_parens_all -> call_args_no_parens_one : '$1'. +call_args_no_parens_all -> call_args_no_parens_ambig : '$1'. +call_args_no_parens_all -> call_args_no_parens_many : '$1'. + +call_args_no_parens_one -> call_args_no_parens_kw : ['$1']. +call_args_no_parens_one -> matched_expr : ['$1']. + +call_args_no_parens_ambig -> no_parens_expr : ['$1']. + +call_args_no_parens_many -> matched_expr ',' call_args_no_parens_kw : ['$1', '$3']. +call_args_no_parens_many -> call_args_no_parens_comma_expr : reverse('$1'). +call_args_no_parens_many -> call_args_no_parens_comma_expr ',' call_args_no_parens_kw : reverse(['$3'|'$1']). + +call_args_no_parens_many_strict -> call_args_no_parens_many : '$1'. +call_args_no_parens_many_strict -> open_paren call_args_no_parens_kw close_paren : throw_no_parens_strict('$1'). +call_args_no_parens_many_strict -> open_paren call_args_no_parens_many close_paren : throw_no_parens_strict('$1'). + +stab_parens_many -> open_paren call_args_no_parens_kw close_paren : ['$2']. +stab_parens_many -> open_paren call_args_no_parens_many close_paren : '$2'. + +% Containers + +container_expr -> matched_expr : '$1'. +container_expr -> unmatched_expr : '$1'. +container_expr -> no_parens_expr : throw_no_parens_container_strict('$1'). + +container_args_base -> container_expr : ['$1']. +container_args_base -> container_args_base ',' container_expr : ['$3'|'$1']. + +container_args -> container_args_base : lists:reverse('$1'). +container_args -> container_args_base ',' : lists:reverse('$1'). +container_args -> container_args_base ',' kw : lists:reverse(['$3'|'$1']). + +% Function calls with parentheses + +call_args_parens_expr -> matched_expr : '$1'. +call_args_parens_expr -> unmatched_expr : '$1'. +call_args_parens_expr -> no_parens_expr : throw_no_parens_many_strict('$1'). + +call_args_parens_base -> call_args_parens_expr : ['$1']. +call_args_parens_base -> call_args_parens_base ',' call_args_parens_expr : ['$3'|'$1']. + +call_args_parens -> empty_paren : []. +call_args_parens -> open_paren no_parens_expr close_paren : ['$2']. +call_args_parens -> open_paren kw close_paren : ['$2']. +call_args_parens -> open_paren call_args_parens_base close_paren : reverse('$2'). +call_args_parens -> open_paren call_args_parens_base ',' kw close_paren : reverse(['$4'|'$2']). + +% KV + +kw_eol -> kw_identifier : ?exprs('$1'). +kw_eol -> kw_identifier eol : ?exprs('$1'). +kw_eol -> kw_identifier_safe : build_quoted_atom('$1', true). +kw_eol -> kw_identifier_safe eol : build_quoted_atom('$1', true). +kw_eol -> kw_identifier_unsafe : build_quoted_atom('$1', false). +kw_eol -> kw_identifier_unsafe eol : build_quoted_atom('$1', false). + +kw_base -> kw_eol container_expr : [{'$1', '$2'}]. +kw_base -> kw_base ',' kw_eol container_expr : [{'$3', '$4'}|'$1']. + +kw -> kw_base : reverse('$1'). +kw -> kw_base ',' : reverse('$1'). + +call_args_no_parens_kw_expr -> kw_eol matched_expr : {'$1', '$2'}. +call_args_no_parens_kw_expr -> kw_eol no_parens_expr : {'$1', '$2'}. + +call_args_no_parens_kw -> call_args_no_parens_kw_expr : ['$1']. +call_args_no_parens_kw -> call_args_no_parens_kw_expr ',' call_args_no_parens_kw : ['$1'|'$3']. + +% Lists + +list_args -> kw : '$1'. +list_args -> container_args_base : reverse('$1'). +list_args -> container_args_base ',' : reverse('$1'). +list_args -> container_args_base ',' kw : reverse('$1', '$3'). + +list -> open_bracket ']' : build_list('$1', []). +list -> open_bracket list_args close_bracket : build_list('$1', '$2'). + +% Tuple + +tuple -> open_curly '}' : build_tuple('$1', []). +tuple -> open_curly container_args close_curly : build_tuple('$1', '$2'). + +% Bitstrings + +bit_string -> open_bit '>>' : build_bit('$1', []). +bit_string -> open_bit container_args close_bit : build_bit('$1', '$2'). + +% Map and structs + +%% Allow unquote/@something/aliases inside maps and structs. +map_expr -> max_expr : '$1'. +map_expr -> dot_identifier : build_identifier('$1', nil). +map_expr -> at_op_eol map_expr : build_unary_op('$1', '$2'). + +assoc_op_eol -> assoc_op : '$1'. +assoc_op_eol -> assoc_op eol : '$1'. + +assoc_expr -> matched_expr assoc_op_eol matched_expr : {'$1', '$3'}. +assoc_expr -> unmatched_expr assoc_op_eol unmatched_expr : {'$1', '$3'}. +assoc_expr -> matched_expr assoc_op_eol unmatched_expr : {'$1', '$3'}. +assoc_expr -> unmatched_expr assoc_op_eol matched_expr : {'$1', '$3'}. +assoc_expr -> map_expr : '$1'. + +assoc_update -> matched_expr pipe_op_eol assoc_expr : {'$2', '$1', ['$3']}. +assoc_update -> unmatched_expr pipe_op_eol assoc_expr : {'$2', '$1', ['$3']}. + +assoc_update_kw -> matched_expr pipe_op_eol kw : {'$2', '$1', '$3'}. +assoc_update_kw -> unmatched_expr pipe_op_eol kw : {'$2', '$1', '$3'}. + +assoc_base -> assoc_expr : ['$1']. +assoc_base -> assoc_base ',' assoc_expr : ['$3'|'$1']. + +assoc -> assoc_base : reverse('$1'). +assoc -> assoc_base ',' : reverse('$1'). + +map_op -> '%{}' : '$1'. +map_op -> '%{}' eol : '$1'. + +map_close -> kw close_curly : '$1'. +map_close -> assoc close_curly : '$1'. +map_close -> assoc_base ',' kw close_curly : reverse('$1', '$3'). + +map_args -> open_curly '}' : build_map('$1', []). +map_args -> open_curly map_close : build_map('$1', '$2'). +map_args -> open_curly assoc_update close_curly : build_map_update('$1', '$2', []). +map_args -> open_curly assoc_update ',' close_curly : build_map_update('$1', '$2', []). +map_args -> open_curly assoc_update ',' map_close : build_map_update('$1', '$2', '$4'). +map_args -> open_curly assoc_update_kw close_curly : build_map_update('$1', '$2', []). + +struct_op -> '%' : '$1'. + +map -> map_op map_args : '$2'. +map -> struct_op map_expr map_args : {'%', meta_from_token('$1'), ['$2', '$3']}. +map -> struct_op map_expr eol map_args : {'%', meta_from_token('$1'), ['$2', '$4']}. + +Erlang code. + +-define(file(), get(elixir_parser_file)). +-define(id(Token), element(1, Token)). +-define(location(Token), element(2, Token)). +-define(exprs(Token), element(3, Token)). +-define(meta(Node), element(2, Node)). +-define(rearrange_uop(Op), (Op == 'not' orelse Op == '!')). + +%% The following directive is needed for (significantly) faster +%% compilation of the generated .erl file by the HiPE compiler +-compile([{hipe, [{regalloc, linear_scan}]}]). +-import(lists, [reverse/1, reverse/2]). + +meta_from_token(Token, Counter) -> [{counter, Counter}|meta_from_token(Token)]. +meta_from_token(Token) -> meta_from_location(?location(Token)). + +meta_from_location({Line, Column, EndColumn}) + when is_integer(Line), is_integer(Column), is_integer(EndColumn) -> [{line, Line}]. + +%% Operators + +build_op({_Kind, Location, 'in'}, {UOp, _, [Left]}, Right) when ?rearrange_uop(UOp) -> + {UOp, meta_from_location(Location), [{'in', meta_from_location(Location), [Left, Right]}]}; + +build_op({_Kind, Location, Op}, Left, Right) -> + {Op, meta_from_location(Location), [Left, Right]}. + +build_unary_op({_Kind, Location, Op}, Expr) -> + {Op, meta_from_location(Location), [Expr]}. + +build_list(Marker, Args) -> + {Args, ?location(Marker)}. + +build_tuple(_Marker, [Left, Right]) -> + {Left, Right}; +build_tuple(Marker, Args) -> + {'{}', meta_from_token(Marker), Args}. + +build_bit(Marker, Args) -> + {'<<>>', meta_from_token(Marker), Args}. + +build_map(Marker, Args) -> + {'%{}', meta_from_token(Marker), Args}. + +build_map_update(Marker, {Pipe, Left, Right}, Extra) -> + {'%{}', meta_from_token(Marker), [build_op(Pipe, Left, Right ++ Extra)]}. + +%% Blocks + +build_block([{Op, _, [_]}]=Exprs) when ?rearrange_uop(Op) -> {'__block__', [], Exprs}; +build_block([{unquote_splicing, _, Args}]=Exprs) when + length(Args) =< 2 -> {'__block__', [], Exprs}; +build_block([Expr]) -> Expr; +build_block(Exprs) -> {'__block__', [], Exprs}. + +%% Dots + +build_dot_alias(Dot, {'__aliases__', _, Left}, {'aliases', _, Right}) -> + {'__aliases__', meta_from_token(Dot), Left ++ Right}; + +build_dot_alias(_Dot, Atom, {'aliases', _, _} = Token) when is_atom(Atom) -> + throw_bad_atom(Token); + +build_dot_alias(Dot, Other, {'aliases', _, Right}) -> + {'__aliases__', meta_from_token(Dot), [Other|Right]}. + +build_dot_container(Dot, Left, Right) -> + Meta = meta_from_token(Dot), + {{'.', Meta, [Left, '{}']}, Meta, Right}. + +build_dot(Dot, Left, Right) -> + {'.', meta_from_token(Dot), [Left, extract_identifier(Right)]}. + +extract_identifier({Kind, _, Identifier}) when + Kind == identifier; Kind == bracket_identifier; Kind == paren_identifier; + Kind == do_identifier; Kind == op_identifier -> + Identifier. + +%% Identifiers + +build_nested_parens(Dot, Args1, Args2) -> + Identifier = build_identifier(Dot, Args1), + Meta = ?meta(Identifier), + {Identifier, Meta, Args2}. + +build_identifier({'.', Meta, _} = Dot, Args) -> + FArgs = case Args of + nil -> []; + _ -> Args + end, + {Dot, Meta, FArgs}; + +build_identifier({op_identifier, Location, Identifier}, [Arg]) -> + {Identifier, [{ambiguous_op, nil}|meta_from_location(Location)], [Arg]}; + +build_identifier({_, Location, Identifier}, Args) -> + {Identifier, meta_from_location(Location), Args}. + +%% Fn + +build_fn(Op, [{'->', _, [_, _]}|_] = Stab) -> + {fn, meta_from_token(Op), build_stab(Stab)}; +build_fn(Op, _Stab) -> + throw(meta_from_token(Op), "expected clauses to be defined with -> inside: ", "'fn'"). + +%% Access + +build_access(Expr, {List, Location}) -> + Meta = meta_from_location(Location), + {{'.', Meta, ['Elixir.Access', get]}, Meta, [Expr, List]}. + +%% Interpolation aware + +build_sigil({sigil, Location, Sigil, Parts, Modifiers}) -> + Meta = meta_from_location(Location), + {list_to_atom("sigil_" ++ [Sigil]), Meta, [{'<<>>', Meta, string_parts(Parts)}, Modifiers]}. + +build_bin_string({bin_string, _Location, [H]}) when is_binary(H) -> + H; +build_bin_string({bin_string, Location, Args}) -> + {'<<>>', meta_from_location(Location), string_parts(Args)}. + +build_list_string({list_string, _Location, [H]}) when is_binary(H) -> + elixir_utils:characters_to_list(H); +build_list_string({list_string, Location, Args}) -> + Meta = meta_from_location(Location), + {{'.', Meta, ['Elixir.String', to_char_list]}, Meta, [{'<<>>', Meta, string_parts(Args)}]}. + +build_quoted_atom({_, _Location, [H]}, Safe) when is_binary(H) -> + Op = binary_to_atom_op(Safe), erlang:Op(H, utf8); +build_quoted_atom({_, Location, Args}, Safe) -> + Meta = meta_from_location(Location), + {{'.', Meta, [erlang, binary_to_atom_op(Safe)]}, Meta, [{'<<>>', Meta, string_parts(Args)}, utf8]}. + +binary_to_atom_op(true) -> binary_to_existing_atom; +binary_to_atom_op(false) -> binary_to_atom. + +string_parts(Parts) -> + [string_part(Part) || Part <- Parts]. +string_part(Binary) when is_binary(Binary) -> + Binary; +string_part({Location, Tokens}) -> + Form = string_tokens_parse(Tokens), + Meta = meta_from_location(Location), + {'::', Meta, [{{'.', Meta, ['Elixir.Kernel', to_string]}, Meta, [Form]}, {binary, Meta, nil}]}. + +string_tokens_parse(Tokens) -> + case parse(Tokens) of + {ok, Forms} -> Forms; + {error, _} = Error -> throw(Error) + end. + +%% Keywords + +build_stab([{'->', Meta, [Left, Right]}|T]) -> + build_stab(Meta, T, Left, [Right], []); + +build_stab(Else) -> + build_block(Else). + +build_stab(Old, [{'->', New, [Left, Right]}|T], Marker, Temp, Acc) -> + H = {'->', Old, [Marker, build_block(reverse(Temp))]}, + build_stab(New, T, Left, [Right], [H|Acc]); + +build_stab(Meta, [H|T], Marker, Temp, Acc) -> + build_stab(Meta, T, Marker, [H|Temp], Acc); + +build_stab(Meta, [], Marker, Temp, Acc) -> + H = {'->', Meta, [Marker, build_block(reverse(Temp))]}, + reverse([H|Acc]). + +%% Every time the parser sees a (unquote_splicing()) +%% it assumes that a block is being spliced, wrapping +%% the splicing in a __block__. But in the stab clause, +%% we can have (unquote_splicing(1, 2, 3)) -> :ok, in such +%% case, we don't actually want the block, since it is +%% an arg style call. unwrap_splice unwraps the splice +%% from such blocks. +unwrap_splice([{'__block__', [], [{unquote_splicing, _, _}] = Splice}]) -> + Splice; + +unwrap_splice(Other) -> Other. + +unwrap_when(Args) -> + case elixir_utils:split_last(Args) of + {Start, {'when', Meta, [_, _] = End}} -> + [{'when', Meta, Start ++ End}]; + {_, _} -> + Args + end. + +to_block([One]) -> One; +to_block(Other) -> {'__block__', [], reverse(Other)}. + +%% Warnings and errors + +throw(Meta, Error, Token) -> + Line = + case lists:keyfind(line, 1, Meta) of + {line, L} -> L; + false -> 0 + end, + throw({error, {Line, ?MODULE, [Error, Token]}}). + +throw_bad_atom(Token) -> + throw(meta_from_token(Token), "atom cannot be followed by an alias. If the '.' was meant to be " + "part of the atom's name, the atom name must be quoted. Syntax error before: ", "'.'"). + +throw_no_parens_strict(Token) -> + throw(meta_from_token(Token), "unexpected parentheses. If you are making a " + "function call, do not insert spaces between the function name and the " + "opening parentheses. Syntax error before: ", "'('"). + +throw_no_parens_many_strict(Node) -> + throw(?meta(Node), + "unexpected comma. Parentheses are required to solve ambiguity in nested calls.\n\n" + "This error happens when you have nested function calls without parentheses. " + "For example:\n\n" + " one a, two b, c, d\n\n" + "In the example above, we don't know if the parameters \"c\" and \"d\" apply " + "to the function \"one\" or \"two\". You can solve this by explicitly adding " + "parentheses:\n\n" + " one a, two(b, c, d)\n\n" + "Elixir cannot compile otherwise. Syntax error before: ", "','"). + +throw_no_parens_container_strict(Node) -> + throw(?meta(Node), + "unexpected comma. Parentheses are required to solve ambiguity inside containers.\n\n" + "This error may happen when you forget a comma in a list or other container:\n\n" + " [a, b c, d]\n\n" + "Or when you have ambiguous calls:\n\n" + " [one, two three, four, five]\n\n" + "In the example above, we don't know if the parameters \"four\" and \"five\" " + "belongs to the list or the function \"two\". You can solve this by explicitly " + "adding parentheses:\n\n" + " [one, two(three, four), five]\n\n" + "Elixir cannot compile otherwise. Syntax error before: ", "','"). + +throw_invalid_kw_identifier({_, _, do} = Token) -> + throw(meta_from_token(Token), elixir_tokenizer:invalid_do_error("unexpected keyword \"do:\""), "'do:'"); +throw_invalid_kw_identifier({_, _, KW} = Token) -> + throw(meta_from_token(Token), "syntax error before: ", "'" ++ atom_to_list(KW) ++ "':"). + +%% TODO: Make those warnings errors. +warn_empty_stab_clause({stab_op, {Line, _Begin, _End}, '->'}) -> + elixir_errors:warn(Line, ?file(), + "an expression is always required on the right side of ->. " + "Please provide a value after ->"). + +warn_pipe({arrow_op, {Line, _Begin, _End}, Op}, {_, [_|_], [_|_]}) -> + elixir_errors:warn(Line, ?file(), + io_lib:format( + "you are piping into a function call without parentheses, which may be ambiguous. " + "Please wrap the function you are piping into in parentheses. For example:\n\n" + " foo 1 ~ts bar 2 ~ts baz 3\n\n" + "Should be written as:\n\n" + " foo(1) ~ts bar(2) ~ts baz(3)\n", + [Op, Op, Op, Op] + ) + ); +warn_pipe(_Token, _) -> + ok. From 15232fc072df1f97b40c098551a9426c82527641 Mon Sep 17 00:00:00 2001 From: Louis Pilfold Date: Mon, 4 Jan 2016 12:17:31 +0000 Subject: [PATCH 44/68] Add the LFE lexer as an example of erlang .xrl --- samples/Erlang/lfe_scan.xrl | 256 ++++++++++++++++++++++++++++++++++++ 1 file changed, 256 insertions(+) create mode 100644 samples/Erlang/lfe_scan.xrl diff --git a/samples/Erlang/lfe_scan.xrl b/samples/Erlang/lfe_scan.xrl new file mode 100644 index 00000000..72bb1b22 --- /dev/null +++ b/samples/Erlang/lfe_scan.xrl @@ -0,0 +1,256 @@ +%% Copyright (c) 2008-2013 Robert Virding +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. + +%% File : lfe_scan.xrl +%% Author : Robert Virding +%% Purpose : Token definitions for Lisp Flavoured Erlang. + +Definitions. +B = [01] +O = [0-7] +D = [0-9] +H = [0-9a-fA-F] +B36 = [0-9a-zA-Z] +U = [A-Z] +L = [a-z] +A = ({U}|{L}) +DEL = [][()}{";\000-\s] +SYM = [^][()}{";\000-\s\177-\237] +SSYM = [^][()}{"|;#`',\000-\s\177-\237] +WS = ([\000-\s]|;[^\n]*) + +Rules. +%% Bracketed Comments using #| foo |# +#{D}*\|[^\|]*\|+([^#\|][^\|]*\|+)*# : + block_comment(string:substr(TokenChars, 3)). + +%% Separators +' : {token,{'\'',TokenLine}}. +` : {token,{'`',TokenLine}}. +, : {token,{',',TokenLine}}. +,@ : {token,{',@',TokenLine}}. +\. : {token,{'.',TokenLine}}. +[][()}{] : {token,{list_to_atom(TokenChars),TokenLine}}. + +#{D}*[bB]\( : {token,{'#B(',TokenLine}}. +#{D}*[mM]\( : {token,{'#M(',TokenLine}}. +#{D}*\( : {token,{'#(',TokenLine}}. +#{D}*\. : {token,{'#.',TokenLine}}. + +#{D}*` : {token,{'#`',TokenLine}}. +#{D}*; : {token,{'#;',TokenLine}}. +#{D}*, : {token,{'#,',TokenLine}}. +#{D}*,@ : {token,{'#,@',TokenLine}}. + +%% Characters +#{D}*\\(x{H}+|.) : char_token(skip_past(TokenChars, $\\, $\\), TokenLine). + +%% Based numbers +#{D}*\*{SYM}+ : base_token(skip_past(TokenChars, $*, $*), 2, TokenLine). +#{D}*[bB]{SYM}+ : base_token(skip_past(TokenChars, $b, $B), 2, TokenLine). +#{D}*[oO]{SYM}+ : base_token(skip_past(TokenChars, $o, $O), 8, TokenLine). +#{D}*[dD]{SYM}+ : base_token(skip_past(TokenChars, $d, $D), 10, TokenLine). +#{D}*[xX]{SYM}+ : base_token(skip_past(TokenChars, $x, $X), 16, TokenLine). +#{D}*[rR]{SYM}+ : + %% Scan over digit chars to get base. + {Base,[_|Ds]} = base1(tl(TokenChars), 10, 0), + base_token(Ds, Base, TokenLine). + +%% String +"(\\x{H}+;|\\.|[^"\\])*" : + %% Strip quotes. + S = string:substr(TokenChars, 2, TokenLen - 2), + {token,{string,TokenLine,chars(S)}}. +%% Binary string +#"(\\x{H}+;|\\.|[^"\\])*" : + %% Strip quotes. + S = string:substr(TokenChars, 3, TokenLen - 3), + Bin = unicode:characters_to_binary(chars(S), utf8, utf8), + {token,{binary,TokenLine,Bin}}. +%% Symbols +\|(\\x{H}+;|\\.|[^|\\])*\| : + %% Strip quotes. + S = string:substr(TokenChars, 2, TokenLen - 2), + symbol_token(chars(S), TokenLine). +%% Funs +#'{SSYM}{SYM}*/{D}+ : + %% Strip sharpsign single-quote. + FunStr = string:substr(TokenChars,3), + {token,{'#\'',TokenLine,FunStr}}. +%% Atoms +[+-]?{D}+ : + case catch {ok,list_to_integer(TokenChars)} of + {ok,I} -> {token,{number,TokenLine,I}}; + _ -> {error,"illegal integer"} + end. +[+-]?{D}+\.{D}+([eE][+-]?{D}+)? : + case catch {ok,list_to_float(TokenChars)} of + {ok,F} -> {token,{number,TokenLine,F}}; + _ -> {error,"illegal float"} + end. +{SSYM}{SYM}* : + symbol_token(TokenChars, TokenLine). +{WS}+ : skip_token. + +Erlang code. +%% Copyright (c) 2008-2013 Robert Virding +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. + +%% File : lfe_scan.erl +%% Author : Robert Virding +%% Purpose : Token definitions for Lisp Flavoured Erlang. + +-export([start_symbol_char/1,symbol_char/1]). + +-import(string, [substr/2,substr/3]). + +%% start_symbol_char(Char) -> true | false. +%% symbol_char(Char) -> true | false. +%% Define start symbol chars and symbol chars. + +start_symbol_char($#) -> false; +start_symbol_char($`) -> false; +start_symbol_char($') -> false; %' +start_symbol_char($,) -> false; +start_symbol_char($|) -> false; %Symbol quote character +start_symbol_char(C) -> symbol_char(C). + +symbol_char($() -> false; +symbol_char($)) -> false; +symbol_char($[) -> false; +symbol_char($]) -> false; +symbol_char(${) -> false; +symbol_char($}) -> false; +symbol_char($") -> false; +symbol_char($;) -> false; +symbol_char(C) -> ((C > $\s) and (C =< $~)) orelse (C > $\240). + +%% symbol_token(Chars, Line) -> {token,{symbol,Line,Symbol}} | {error,E}. +%% Build a symbol from list of legal characters, else error. + +symbol_token(Cs, L) -> + case catch {ok,list_to_atom(Cs)} of + {ok,S} -> {token,{symbol,L,S}}; + _ -> {error,"illegal symbol"} + end. + +%% base_token(Chars, Base, Line) -> Integer. +%% Convert a string of Base characters into a number. We only allow +%% base betqeen 2 and 36, and an optional sign character first. + +base_token(_, B, _) when B < 2; B > 36 -> + {error,"illegal number base"}; +base_token([$+|Cs], B, L) -> base_token(Cs, B, +1, L); +base_token([$-|Cs], B, L) -> base_token(Cs, B, -1, L); +base_token(Cs, B, L) -> base_token(Cs, B, +1, L). + +base_token(Cs, B, S, L) -> + case base1(Cs, B, 0) of + {N,[]} -> {token,{number,L,S*N}}; + {_,_} -> {error,"illegal based number"} + end. + +base1([C|Cs], Base, SoFar) when C >= $0, C =< $9, C < Base + $0 -> + Next = SoFar * Base + (C - $0), + base1(Cs, Base, Next); +base1([C|Cs], Base, SoFar) when C >= $a, C =< $z, C < Base + $a - 10 -> + Next = SoFar * Base + (C - $a + 10), + base1(Cs, Base, Next); +base1([C|Cs], Base, SoFar) when C >= $A, C =< $Z, C < Base + $A - 10 -> + Next = SoFar * Base + (C - $A + 10), + base1(Cs, Base, Next); +base1([C|Cs], _Base, SoFar) -> {SoFar,[C|Cs]}; +base1([], _Base, N) -> {N,[]}. + +-define(IS_UNICODE(C), ((C >= 0) and (C =< 16#10FFFF))). + +%% char_token(InputChars, Line) -> {token,{number,L,N}} | {error,E}. +%% Convert an input string into the corresponding character. For a +%% sequence of hex characters we check resultant is code is in the +%% unicode range. + +char_token([$x,C|Cs], L) -> + case base1([C|Cs], 16, 0) of + {N,[]} when ?IS_UNICODE(N) -> {token,{number,L,N}}; + _ -> {error,"illegal character"} + end; +char_token([C], L) -> {token,{number,L,C}}. + +%% chars(InputChars) -> Chars. +%% Convert an input string into the corresponding string characters. +%% We know that the input string is correct. + +chars([$\\,$x,C|Cs0]) -> + case hex_char(C) of + true -> + case base1([C|Cs0], 16, 0) of + {N,[$;|Cs1]} -> [N|chars(Cs1)]; + _Other -> [escape_char($x)|chars([C|Cs0])] + end; + false -> [escape_char($x)|chars([C|Cs0])] + end; +chars([$\\,C|Cs]) -> [escape_char(C)|chars(Cs)]; +chars([C|Cs]) -> [C|chars(Cs)]; +chars([]) -> []. + +hex_char(C) when C >= $0, C =< $9 -> true; +hex_char(C) when C >= $a, C =< $f -> true; +hex_char(C) when C >= $A, C =< $F -> true; +hex_char(_) -> false. + +escape_char($b) -> $\b; %\b = BS +escape_char($t) -> $\t; %\t = TAB +escape_char($n) -> $\n; %\n = LF +escape_char($v) -> $\v; %\v = VT +escape_char($f) -> $\f; %\f = FF +escape_char($r) -> $\r; %\r = CR +escape_char($e) -> $\e; %\e = ESC +escape_char($s) -> $\s; %\s = SPC +escape_char($d) -> $\d; %\d = DEL +escape_char(C) -> C. + +%% Block Comment: +%% Provide a sensible error when people attempt to include nested +%% comments because currently the parser cannot process them without +%% a rebuild. But simply exploding on a '#|' is not going to be that +%% helpful. + +block_comment(TokenChars) -> + %% Check we're not opening another comment block. + case string:str(TokenChars, "#|") of + 0 -> skip_token; %% No nesting found + _ -> {error, "illegal nested block comment"} + end. + +%% skip_until(String, Char1, Char2) -> String. +%% skip_past(String, Char1, Char2) -> String. + +%% skip_until([C|_]=Cs, C1, C2) when C =:= C1 ; C =:= C2 -> Cs; +%% skip_until([_|Cs], C1, C2) -> skip_until(Cs, C1, C2); +%% skip_until([], _, _) -> []. + +skip_past([C|Cs], C1, C2) when C =:= C1 ; C =:= C2 -> Cs; +skip_past([_|Cs], C1, C2) -> skip_past(Cs, C1, C2); +skip_past([], _, _) -> []. From 71f124faa5634cdc45b7c170aa21061bc3532b70 Mon Sep 17 00:00:00 2001 From: Paul Chaignon Date: Sun, 10 Jan 2016 10:10:41 +0100 Subject: [PATCH 45/68] HTTPS links for submodules When cloning the repository with all its submodules, users without an SSH key registered will fail to clone submodules with an SSH link --- .gitmodules | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitmodules b/.gitmodules index 587ae6cd..e3dfa2a7 100644 --- a/.gitmodules +++ b/.gitmodules @@ -676,7 +676,7 @@ url = https://github.com/CausalityLtd/sublime-pony [submodule "vendor/grammars/X10"] path = vendor/grammars/X10 - url = git@github.com:x10-lang/x10-highlighting.git + url = https://github.com/x10-lang/x10-highlighting [submodule "vendor/grammars/language-babel"] path = vendor/grammars/language-babel url = https://github.com/gandm/language-babel @@ -691,7 +691,7 @@ url = https://github.com/freemarker/FreeMarker.tmbundle [submodule "vendor/grammars/MagicPython"] path = vendor/grammars/MagicPython - url = git@github.com:MagicStack/MagicPython.git + url = https://github.com/MagicStack/MagicPython [submodule "vendor/grammars/language-click"] path = vendor/grammars/language-click url = https://github.com/stenverbois/language-click.git @@ -706,4 +706,4 @@ url = https://github.com/erkyrath/language-inform7 [submodule "vendor/grammars/atom-language-stan"] path = vendor/grammars/atom-language-stan - url = git@github.com:jrnold/atom-language-stan.git + url = https://github.com/jrnold/atom-language-stan From 0b9897db1f83248b5252536ba4cd5cc5a804a375 Mon Sep 17 00:00:00 2001 From: Robert Clipsham Date: Mon, 11 Jan 2016 19:15:01 +0000 Subject: [PATCH 46/68] Support .rs.in as a file extension for Rust files. When using syntax extensions in stable or beta Rust channels using the syntex package, it is common to use the file extension .rs.in for the source file, and .rs for the generated file. --- lib/linguist/languages.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index fe3ed9aa..a94d48e8 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -3081,6 +3081,7 @@ Rust: color: "#dea584" extensions: - .rs + - .rs.in ace_mode: rust SAS: From 49254f1f749b7fb21fea5d65c39f429e3756a655 Mon Sep 17 00:00:00 2001 From: Arfon Smith Date: Fri, 15 Jan 2016 09:19:24 -0500 Subject: [PATCH 47/68] Grammar update --- vendor/grammars/AutoHotkey | 2 +- vendor/grammars/SublimePapyrus | 2 +- vendor/grammars/factor | 2 +- vendor/grammars/html.tmbundle | 2 +- vendor/grammars/language-babel | 2 +- vendor/grammars/language-gfm | 2 +- vendor/grammars/language-inform7 | 2 +- vendor/grammars/language-python | 2 +- vendor/grammars/latex.tmbundle | 2 +- vendor/grammars/sql.tmbundle | 2 +- vendor/grammars/sublime-typescript | 2 +- vendor/grammars/swift.tmbundle | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/vendor/grammars/AutoHotkey b/vendor/grammars/AutoHotkey index 77b8f2d7..4da62de2 160000 --- a/vendor/grammars/AutoHotkey +++ b/vendor/grammars/AutoHotkey @@ -1 +1 @@ -Subproject commit 77b8f2d7857e9251e3a6b9047f3eca5f76f0be43 +Subproject commit 4da62de23dc705bf9b95e76cf5e8e51aa1e68fea diff --git a/vendor/grammars/SublimePapyrus b/vendor/grammars/SublimePapyrus index 75d567dc..293a45f6 160000 --- a/vendor/grammars/SublimePapyrus +++ b/vendor/grammars/SublimePapyrus @@ -1 +1 @@ -Subproject commit 75d567dc3a890acbc6edebedad2879b0af426766 +Subproject commit 293a45f665de3fb4e1eaaf37bf152b3861a70d7a diff --git a/vendor/grammars/factor b/vendor/grammars/factor index 7b289493..fffb5014 160000 --- a/vendor/grammars/factor +++ b/vendor/grammars/factor @@ -1 +1 @@ -Subproject commit 7b289493316955db1649eb2745a2db7a78a4c9f9 +Subproject commit fffb501486083871377237b9320159de140a943a diff --git a/vendor/grammars/html.tmbundle b/vendor/grammars/html.tmbundle index 36b90bc1..58640721 160000 --- a/vendor/grammars/html.tmbundle +++ b/vendor/grammars/html.tmbundle @@ -1 +1 @@ -Subproject commit 36b90bc113cdd59dcaeb0885103f9b6a773939e1 +Subproject commit 58640721ca0ea5210786086940f789c299366791 diff --git a/vendor/grammars/language-babel b/vendor/grammars/language-babel index 20c649bc..e63e3e29 160000 --- a/vendor/grammars/language-babel +++ b/vendor/grammars/language-babel @@ -1 +1 @@ -Subproject commit 20c649bcc79b9fd9a72e4b6e1fa11e14d553a9d8 +Subproject commit e63e3e29e93327e1d0979e3acb37a0e2acc463c0 diff --git a/vendor/grammars/language-gfm b/vendor/grammars/language-gfm index e5b24d57..6b60f7d8 160000 --- a/vendor/grammars/language-gfm +++ b/vendor/grammars/language-gfm @@ -1 +1 @@ -Subproject commit e5b24d57e73463f747fae061d6e9d59a74b9f927 +Subproject commit 6b60f7d89bb7afc67640d46eb8a7cc83e266b30d diff --git a/vendor/grammars/language-inform7 b/vendor/grammars/language-inform7 index b953a1ef..857864cd 160000 --- a/vendor/grammars/language-inform7 +++ b/vendor/grammars/language-inform7 @@ -1 +1 @@ -Subproject commit b953a1efedcff21091ba3b7e7fbcd1040c1f02bb +Subproject commit 857864cd812279672284f424da4f65e3c81b6c87 diff --git a/vendor/grammars/language-python b/vendor/grammars/language-python index cdb699e7..75f0d2b0 160000 --- a/vendor/grammars/language-python +++ b/vendor/grammars/language-python @@ -1 +1 @@ -Subproject commit cdb699e7a86fd9f9f84ae561abddb696aad777aa +Subproject commit 75f0d2b06122a51db6e8e0b129b57585cd68f99c diff --git a/vendor/grammars/latex.tmbundle b/vendor/grammars/latex.tmbundle index 3be8cd92..ed94d309 160000 --- a/vendor/grammars/latex.tmbundle +++ b/vendor/grammars/latex.tmbundle @@ -1 +1 @@ -Subproject commit 3be8cd9208fb2aa5e9fe4ebd0074f55433bca7e0 +Subproject commit ed94d3091501cfc6d9aa293ea241d1c0ad6c6395 diff --git a/vendor/grammars/sql.tmbundle b/vendor/grammars/sql.tmbundle index 1fd3f03f..6d4edbc1 160000 --- a/vendor/grammars/sql.tmbundle +++ b/vendor/grammars/sql.tmbundle @@ -1 +1 @@ -Subproject commit 1fd3f03f97e37fde2c7f2dd4c11728a19242d320 +Subproject commit 6d4edbc113d3272f7c097d6b1504624289ee2bc5 diff --git a/vendor/grammars/sublime-typescript b/vendor/grammars/sublime-typescript index d2c76b8c..0504a558 160000 --- a/vendor/grammars/sublime-typescript +++ b/vendor/grammars/sublime-typescript @@ -1 +1 @@ -Subproject commit d2c76b8c8262c9a848d4814662a0c8e13d1e2a6a +Subproject commit 0504a5582b6e5d814630259c87f2d8f8e6027c47 diff --git a/vendor/grammars/swift.tmbundle b/vendor/grammars/swift.tmbundle index e6375f4d..f5ce5833 160000 --- a/vendor/grammars/swift.tmbundle +++ b/vendor/grammars/swift.tmbundle @@ -1 +1 @@ -Subproject commit e6375f4dbb9c5a715e1924b1e9fc1056f0b3dee7 +Subproject commit f5ce5833a914f71aa1ab7df0a262104acd3436ef From 77dfb19a5017dcff22d223f922f7dc193031aa1a Mon Sep 17 00:00:00 2001 From: Arfon Smith Date: Fri, 15 Jan 2016 09:21:43 -0500 Subject: [PATCH 48/68] Bumping to v4.7.4 --- lib/linguist/version.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/linguist/version.rb b/lib/linguist/version.rb index b389c1c9..ba44bb5b 100644 --- a/lib/linguist/version.rb +++ b/lib/linguist/version.rb @@ -1,3 +1,3 @@ module Linguist - VERSION = "4.7.3" + VERSION = "4.7.4" end From d8666e53093f84e3cb0d9b253005b15081e5a7ab Mon Sep 17 00:00:00 2001 From: Paul Chaignon Date: Sat, 16 Jan 2016 11:21:26 +0100 Subject: [PATCH 49/68] Heuristic for Pod vs. Perl .pod files classified as Pod if they contain any Pod syntax --- lib/linguist/heuristics.rb | 8 ++++++++ samples/Perl/Sample.pod | 10 ++++++++++ samples/{Perl => Pod}/PSGI.pod | 0 test/test_heuristics.rb | 8 ++++++++ 4 files changed, 26 insertions(+) create mode 100644 samples/Perl/Sample.pod rename samples/{Perl => Pod}/PSGI.pod (100%) diff --git a/lib/linguist/heuristics.rb b/lib/linguist/heuristics.rb index 01be4e33..0806ce1f 100644 --- a/lib/linguist/heuristics.rb +++ b/lib/linguist/heuristics.rb @@ -293,6 +293,14 @@ module Linguist end end + disambiguate ".pod" do |data| + if /^=\w+$/.match(data) + Language["Pod"] + else + Language["Perl"] + end + end + disambiguate ".pro" do |data| if /^[^#]+:-/.match(data) Language["Prolog"] diff --git a/samples/Perl/Sample.pod b/samples/Perl/Sample.pod new file mode 100644 index 00000000..00fa0b99 --- /dev/null +++ b/samples/Perl/Sample.pod @@ -0,0 +1,10 @@ +use strict; +use warnings; +package DZT::Sample; + +sub return_arrayref_of_values_passed { + my $invocant = shift; + return \@_; +} + +1; diff --git a/samples/Perl/PSGI.pod b/samples/Pod/PSGI.pod similarity index 100% rename from samples/Perl/PSGI.pod rename to samples/Pod/PSGI.pod diff --git a/test/test_heuristics.rb b/test/test_heuristics.rb index 524a522a..702320e9 100644 --- a/test/test_heuristics.rb +++ b/test/test_heuristics.rb @@ -155,6 +155,14 @@ class TestHeuristcs < Minitest::Test }) end + # Candidate languages = ["Pod", "Perl"] + def test_pod_by_heuristics + assert_heuristics({ + "Perl" => all_fixtures("Perl", "*.pod"), + "Pod" => all_fixtures("Pod", "*.pod") + }) + end + # Candidate languages = ["IDL", "Prolog", "QMake", "INI"] def test_pro_by_heuristics assert_heuristics({ From d87fad649c32be985a6a009bf117ad26e7314daf Mon Sep 17 00:00:00 2001 From: chrisarcand Date: Sat, 16 Jan 2016 07:50:55 -0500 Subject: [PATCH 50/68] Improved vim modeline detection TLDR: This greatly increases the flexibility of vim modeline detection to manually set the language of a file. In vim there are two forms of modelines: [text]{white}{vi:|vim:|ex:}[white]{options} examples: 'vim: syntax=perl', 'ex: filetype=ruby' -and- [text]{white}{vi:|vim:|Vim:|ex:}[white]se[t] {options}:[text] examples: 'vim set syntax=perl:', 'Vim: se ft=ruby:' As you can see, there are many combinations. These changes should allow most combinations to be used. The two most important additions are the use of the keyword 'syntax', as well as the addition of the first form (you now no longer need to use the keyword 'set' with a colon at the end). The use of first form with 'syntax' is very, very common across GitHub: https://github.com/search?l=ruby&q=vim%3A+syntax%3D&ref=searchresults&type=Code&utf8=%E2%9C%93 --- README.md | 3 +++ lib/linguist/strategy/modeline.rb | 17 ++++++++++++++--- test/fixtures/Data/Modelines/ruby4 | 3 +++ test/fixtures/Data/Modelines/ruby5 | 3 +++ test/fixtures/Data/Modelines/ruby6 | 3 +++ test/fixtures/Data/Modelines/ruby7 | 3 +++ test/fixtures/Data/Modelines/ruby8 | 3 +++ test/fixtures/Data/Modelines/ruby9 | 3 +++ test/test_modelines.rb | 6 ++++++ 9 files changed, 41 insertions(+), 3 deletions(-) create mode 100644 test/fixtures/Data/Modelines/ruby4 create mode 100644 test/fixtures/Data/Modelines/ruby5 create mode 100644 test/fixtures/Data/Modelines/ruby6 create mode 100644 test/fixtures/Data/Modelines/ruby7 create mode 100644 test/fixtures/Data/Modelines/ruby8 create mode 100644 test/fixtures/Data/Modelines/ruby9 diff --git a/README.md b/README.md index 36bcbd50..2d661ef9 100644 --- a/README.md +++ b/README.md @@ -59,6 +59,9 @@ Alternatively, you can use Vim or Emacs style modelines to set the language for ##### Vim ``` +# Some examples of various styles: +vim: syntax=java +vim: set syntax=ruby: vim: set filetype=prolog: vim: set ft=cpp: ``` diff --git a/lib/linguist/strategy/modeline.rb b/lib/linguist/strategy/modeline.rb index f995d940..ece98a1f 100644 --- a/lib/linguist/strategy/modeline.rb +++ b/lib/linguist/strategy/modeline.rb @@ -1,8 +1,19 @@ module Linguist module Strategy class Modeline - EmacsModeline = /-\*-\s*(?:(?!mode)[\w-]+\s*:\s*(?:[\w+-]+)\s*;?\s*)*(?:mode\s*:)?\s*([\w+-]+)\s*(?:;\s*(?!mode)[\w-]+\s*:\s*[\w+-]+\s*)*;?\s*-\*-/i - VimModeline = /vim:\s*set.*\s(?:ft|filetype)=(\w+)\s?.*:/i + EMACS_MODELINE = /-\*-\s*(?:(?!mode)[\w-]+\s*:\s*(?:[\w+-]+)\s*;?\s*)*(?:mode\s*:)?\s*([\w+-]+)\s*(?:;\s*(?!mode)[\w-]+\s*:\s*[\w+-]+\s*)*;?\s*-\*-/i + + # First form vim modeline + # [text]{white}{vi:|vim:|ex:}[white]{options} + # ex: 'vim: syntax=perl' + VIM_MODELINE_1 = /(?:vim|vi|ex):\s*(?:ft|filetype|syntax)=(\w+)\s?/i + + # Second form vim modeline (compatible with some versions of Vi) + # [text]{white}{vi:|vim:|Vim:|ex:}[white]se[t] {options}:[text] + # ex: 'vim set syntax=perl:' + VIM_MODELINE_2 = /(?:vim|vi|Vim|ex):\s*se(?:t)?.*\s(?:ft|filetype|syntax)=(\w+)\s?.*:/i + + MODELINES = [EMACS_MODELINE, VIM_MODELINE_1, VIM_MODELINE_2] # Public: Detects language based on Vim and Emacs modelines # @@ -22,7 +33,7 @@ module Linguist # # Returns a String or nil def self.modeline(data) - match = data.match(EmacsModeline) || data.match(VimModeline) + match = MODELINES.map { |regex| data.match(regex) }.reject(&:nil?).first match[1] if match end end diff --git a/test/fixtures/Data/Modelines/ruby4 b/test/fixtures/Data/Modelines/ruby4 new file mode 100644 index 00000000..e3b50151 --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby4 @@ -0,0 +1,3 @@ +# vim: filetype=ruby + +# I am Ruby diff --git a/test/fixtures/Data/Modelines/ruby5 b/test/fixtures/Data/Modelines/ruby5 new file mode 100644 index 00000000..10349050 --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby5 @@ -0,0 +1,3 @@ +# vim: ft=ruby + +# I am Ruby diff --git a/test/fixtures/Data/Modelines/ruby6 b/test/fixtures/Data/Modelines/ruby6 new file mode 100644 index 00000000..a2b49dae --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby6 @@ -0,0 +1,3 @@ +# vim: syntax=Ruby + +# I am Ruby diff --git a/test/fixtures/Data/Modelines/ruby7 b/test/fixtures/Data/Modelines/ruby7 new file mode 100644 index 00000000..1ed5b28f --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby7 @@ -0,0 +1,3 @@ +# vim: se syntax=ruby: + +# I am Ruby diff --git a/test/fixtures/Data/Modelines/ruby8 b/test/fixtures/Data/Modelines/ruby8 new file mode 100644 index 00000000..8e854741 --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby8 @@ -0,0 +1,3 @@ +# vim: set syntax=ruby: + +# I am Ruby diff --git a/test/fixtures/Data/Modelines/ruby9 b/test/fixtures/Data/Modelines/ruby9 new file mode 100644 index 00000000..ac82358d --- /dev/null +++ b/test/fixtures/Data/Modelines/ruby9 @@ -0,0 +1,3 @@ +# ex: syntax=ruby + +# I am Ruby diff --git a/test/test_modelines.rb b/test/test_modelines.rb index fd259782..85718955 100644 --- a/test/test_modelines.rb +++ b/test/test_modelines.rb @@ -11,6 +11,12 @@ class TestModelines < Minitest::Test assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby") assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby2") assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby3") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby4") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby5") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby6") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby7") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby8") + assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby9") assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplus") assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs1") assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs2") From 52938f6dbf65d317c5074933319f2309e2557d99 Mon Sep 17 00:00:00 2001 From: Paul Chaignon Date: Sun, 17 Jan 2016 18:14:51 +0100 Subject: [PATCH 51/68] Test submodules are using HTTPS links --- test/test_grammars.rb | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/test/test_grammars.rb b/test/test_grammars.rb index a2ad1e53..29211cc2 100644 --- a/test/test_grammars.rb +++ b/test/test_grammars.rb @@ -113,6 +113,20 @@ class TestGrammars < Minitest::Test assert_equal [], licensed, msg end + def test_submodules_use_https_links + File.open(".gitmodules", "r") do |fh| + ssh_submodules = [] + fh.each_line do |line| + if matches = line.match(/url = (git@.*)/) + submodule_link = matches.captures[0] + ssh_submodules.push(submodule_link) + end + end + msg = "The following submodules don't have an HTTPS link:\n* #{ssh_submodules.join("\n* ")}" + assert_equal [], ssh_submodules, msg + end + end + private def submodule_paths From df88de14e3135915181370d8c421be7cc73be8ef Mon Sep 17 00:00:00 2001 From: James Ko Date: Wed, 20 Jan 2016 19:01:19 -0500 Subject: [PATCH 52/68] Add .xproj to list of XML extensions --- lib/linguist/languages.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index fe3ed9aa..c4fb0a4c 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -3797,6 +3797,7 @@ XML: - .xliff - .xmi - .xml.dist + - .xproj - .xsd - .xul - .zcml From 721e5b4656571000b8a184f53ddc62fd29e9f8ac Mon Sep 17 00:00:00 2001 From: Abigail Date: Thu, 28 Jan 2016 11:41:35 +0000 Subject: [PATCH 53/68] Add detection of GrammarKit-generated files GrammarKit is a plugin by JetBrains for creating custom language plugins for JetBrains IDEs (such as IntelliJ, RubyMine, CLion and more). It defines a BNF parser language which can be used to generate a parser in Java, and it also integrates JFLex for generating a lexer in Java. Both of these generated Java files can be recognised by a comment on the first line of the file, and so classifying them as generated is trivial. --- lib/linguist/generated.rb | 31 +- samples/Java/GrammarKit.java | 625 +++++++++++++++++++++++++++++++++++ samples/Java/JFlexLexer.java | 482 +++++++++++++++++++++++++++ test/test_generated.rb | 6 + 4 files changed, 1143 insertions(+), 1 deletion(-) create mode 100644 samples/Java/GrammarKit.java create mode 100644 samples/Java/JFlexLexer.java diff --git a/lib/linguist/generated.rb b/lib/linguist/generated.rb index 35766e4d..7747406f 100644 --- a/lib/linguist/generated.rb +++ b/lib/linguist/generated.rb @@ -72,7 +72,9 @@ module Linguist vcr_cassette? || generated_module? || generated_unity3d_meta? || - generated_racc? + generated_racc? || + generated_jflex? || + generated_grammarkit? end # Internal: Is the blob an Xcode file? @@ -373,5 +375,32 @@ module Linguist return false unless lines.count > 2 return lines[2].start_with?("# This file is automatically generated by Racc") end + + # Internal: Is this a JFlex-generated file? + # + # A JFlex-generated file contains: + # /* The following code was generated by JFlex x.y.z on d/at/e ti:me */ + # on the first line. + # + # Return true or false + def generated_jflex? + return false unless extname == '.java' + return false unless lines.count > 1 + return lines[0].start_with?("/* The following code was generated by JFlex ") + end + + # Internal: Is this a GrammarKit-generated file? + # + # A GrammarKit-generated file typically contain: + # // This is a generated file. Not intended for manual editing. + # on the first line. This is not always the case, as it's possible to + # customize the class header. + # + # Return true or false + def generated_grammarkit? + return false unless extname == '.java' + return false unless lines.count > 1 + return lines[0].start_with?("// This is a generated file. Not intended for manual editing.") + end end end diff --git a/samples/Java/GrammarKit.java b/samples/Java/GrammarKit.java new file mode 100644 index 00000000..a5db6da8 --- /dev/null +++ b/samples/Java/GrammarKit.java @@ -0,0 +1,625 @@ +// This is a generated file. Not intended for manual editing. +package org.intellij.grammar.parser; + +import com.intellij.lang.PsiBuilder; +import com.intellij.lang.PsiBuilder.Marker; +import static org.intellij.grammar.psi.BnfTypes.*; +import static org.intellij.grammar.parser.GeneratedParserUtilBase.*; +import com.intellij.psi.tree.IElementType; +import com.intellij.lang.ASTNode; +import com.intellij.psi.tree.TokenSet; +import com.intellij.lang.PsiParser; +import com.intellij.lang.LightPsiParser; + +@SuppressWarnings({"SimplifiableIfStatement", "UnusedAssignment"}) +public class GrammarParser implements PsiParser, LightPsiParser { + + public ASTNode parse(IElementType t, PsiBuilder b) { + parseLight(t, b); + return b.getTreeBuilt(); + } + + public void parseLight(IElementType t, PsiBuilder b) { + boolean r; + b = adapt_builder_(t, b, this, EXTENDS_SETS_); + Marker m = enter_section_(b, 0, _COLLAPSE_, null); + if (t == BNF_ATTR) { + r = attr(b, 0); + } + else if (t == BNF_ATTR_PATTERN) { + r = attr_pattern(b, 0); + } + else if (t == BNF_ATTR_VALUE) { + r = attr_value(b, 0); + } + else if (t == BNF_ATTRS) { + r = attrs(b, 0); + } + else if (t == BNF_CHOICE) { + r = choice(b, 0); + } + else if (t == BNF_EXPRESSION) { + r = expression(b, 0); + } + else if (t == BNF_LITERAL_EXPRESSION) { + r = literal_expression(b, 0); + } + else if (t == BNF_MODIFIER) { + r = modifier(b, 0); + } + else if (t == BNF_PAREN_EXPRESSION) { + r = paren_expression(b, 0); + } + else if (t == BNF_PREDICATE) { + r = predicate(b, 0); + } + else if (t == BNF_PREDICATE_SIGN) { + r = predicate_sign(b, 0); + } + else if (t == BNF_QUANTIFIED) { + r = quantified(b, 0); + } + else if (t == BNF_QUANTIFIER) { + r = quantifier(b, 0); + } + else if (t == BNF_REFERENCE_OR_TOKEN) { + r = reference_or_token(b, 0); + } + else if (t == BNF_RULE) { + r = rule(b, 0); + } + else if (t == BNF_SEQUENCE) { + r = sequence(b, 0); + } + else if (t == BNF_STRING_LITERAL_EXPRESSION) { + r = string_literal_expression(b, 0); + } + else { + r = parse_root_(t, b, 0); + } + exit_section_(b, 0, m, t, r, true, TRUE_CONDITION); + } + + protected boolean parse_root_(IElementType t, PsiBuilder b, int l) { + return grammar(b, l + 1); + } + + public static final TokenSet[] EXTENDS_SETS_ = new TokenSet[] { + create_token_set_(BNF_LITERAL_EXPRESSION, BNF_STRING_LITERAL_EXPRESSION), + create_token_set_(BNF_CHOICE, BNF_EXPRESSION, BNF_LITERAL_EXPRESSION, BNF_PAREN_EXPRESSION, + BNF_PREDICATE, BNF_QUANTIFIED, BNF_REFERENCE_OR_TOKEN, BNF_SEQUENCE, + BNF_STRING_LITERAL_EXPRESSION), + }; + + /* ********************************************************** */ + // id attr_pattern? '=' attr_value ';'? + public static boolean attr(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr")) return false; + boolean r, p; + Marker m = enter_section_(b, l, _NONE_, ""); + r = consumeToken(b, BNF_ID); + p = r; // pin = 1 + r = r && report_error_(b, attr_1(b, l + 1)); + r = p && report_error_(b, consumeToken(b, BNF_OP_EQ)) && r; + r = p && report_error_(b, attr_value(b, l + 1)) && r; + r = p && attr_4(b, l + 1) && r; + exit_section_(b, l, m, BNF_ATTR, r, p, attr_recover_until_parser_); + return r || p; + } + + // attr_pattern? + private static boolean attr_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_1")) return false; + attr_pattern(b, l + 1); + return true; + } + + // ';'? + private static boolean attr_4(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_4")) return false; + consumeToken(b, BNF_SEMICOLON); + return true; + } + + /* ********************************************************** */ + // '(' string ')' + public static boolean attr_pattern(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_pattern")) return false; + if (!nextTokenIs(b, BNF_LEFT_PAREN)) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_LEFT_PAREN); + r = r && consumeToken(b, BNF_STRING); + r = r && consumeToken(b, BNF_RIGHT_PAREN); + exit_section_(b, m, BNF_ATTR_PATTERN, r); + return r; + } + + /* ********************************************************** */ + // !'}' + static boolean attr_recover_until(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_recover_until")) return false; + boolean r; + Marker m = enter_section_(b, l, _NOT_, null); + r = !consumeToken(b, BNF_RIGHT_BRACE); + exit_section_(b, l, m, null, r, false, null); + return r; + } + + /* ********************************************************** */ + // (reference_or_token | literal_expression) !'=' + public static boolean attr_value(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_value")) return false; + boolean r; + Marker m = enter_section_(b, l, _NONE_, ""); + r = attr_value_0(b, l + 1); + r = r && attr_value_1(b, l + 1); + exit_section_(b, l, m, BNF_ATTR_VALUE, r, false, null); + return r; + } + + // reference_or_token | literal_expression + private static boolean attr_value_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_value_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = reference_or_token(b, l + 1); + if (!r) r = literal_expression(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // !'=' + private static boolean attr_value_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attr_value_1")) return false; + boolean r; + Marker m = enter_section_(b, l, _NOT_, null); + r = !consumeToken(b, BNF_OP_EQ); + exit_section_(b, l, m, null, r, false, null); + return r; + } + + /* ********************************************************** */ + // '{' attr* '}' + public static boolean attrs(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attrs")) return false; + if (!nextTokenIs(b, BNF_LEFT_BRACE)) return false; + boolean r, p; + Marker m = enter_section_(b, l, _NONE_, null); + r = consumeToken(b, BNF_LEFT_BRACE); + p = r; // pin = 1 + r = r && report_error_(b, attrs_1(b, l + 1)); + r = p && consumeToken(b, BNF_RIGHT_BRACE) && r; + exit_section_(b, l, m, BNF_ATTRS, r, p, null); + return r || p; + } + + // attr* + private static boolean attrs_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "attrs_1")) return false; + int c = current_position_(b); + while (true) { + if (!attr(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "attrs_1", c)) break; + c = current_position_(b); + } + return true; + } + + /* ********************************************************** */ + // '{' sequence ('|' sequence)* '}' | sequence choice_tail* + public static boolean choice(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice")) return false; + boolean r; + Marker m = enter_section_(b, l, _COLLAPSE_, ""); + r = choice_0(b, l + 1); + if (!r) r = choice_1(b, l + 1); + exit_section_(b, l, m, BNF_CHOICE, r, false, null); + return r; + } + + // '{' sequence ('|' sequence)* '}' + private static boolean choice_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_LEFT_BRACE); + r = r && sequence(b, l + 1); + r = r && choice_0_2(b, l + 1); + r = r && consumeToken(b, BNF_RIGHT_BRACE); + exit_section_(b, m, null, r); + return r; + } + + // ('|' sequence)* + private static boolean choice_0_2(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_0_2")) return false; + int c = current_position_(b); + while (true) { + if (!choice_0_2_0(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "choice_0_2", c)) break; + c = current_position_(b); + } + return true; + } + + // '|' sequence + private static boolean choice_0_2_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_0_2_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_OP_OR); + r = r && sequence(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // sequence choice_tail* + private static boolean choice_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_1")) return false; + boolean r; + Marker m = enter_section_(b); + r = sequence(b, l + 1); + r = r && choice_1_1(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // choice_tail* + private static boolean choice_1_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_1_1")) return false; + int c = current_position_(b); + while (true) { + if (!choice_tail(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "choice_1_1", c)) break; + c = current_position_(b); + } + return true; + } + + /* ********************************************************** */ + // '|' sequence + static boolean choice_tail(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "choice_tail")) return false; + if (!nextTokenIs(b, BNF_OP_OR)) return false; + boolean r, p; + Marker m = enter_section_(b, l, _NONE_, null); + r = consumeToken(b, BNF_OP_OR); + p = r; // pin = 1 + r = r && sequence(b, l + 1); + exit_section_(b, l, m, null, r, p, null); + return r || p; + } + + /* ********************************************************** */ + // choice? + public static boolean expression(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "expression")) return false; + Marker m = enter_section_(b, l, _COLLAPSE_, ""); + choice(b, l + 1); + exit_section_(b, l, m, BNF_EXPRESSION, true, false, null); + return true; + } + + /* ********************************************************** */ + // (attrs | rule) * + static boolean grammar(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "grammar")) return false; + int c = current_position_(b); + while (true) { + if (!grammar_0(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "grammar", c)) break; + c = current_position_(b); + } + return true; + } + + // attrs | rule + private static boolean grammar_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "grammar_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = attrs(b, l + 1); + if (!r) r = rule(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + /* ********************************************************** */ + // string_literal_expression | number + public static boolean literal_expression(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "literal_expression")) return false; + if (!nextTokenIs(b, "", BNF_NUMBER, BNF_STRING)) return false; + boolean r; + Marker m = enter_section_(b, l, _COLLAPSE_, ""); + r = string_literal_expression(b, l + 1); + if (!r) r = consumeToken(b, BNF_NUMBER); + exit_section_(b, l, m, BNF_LITERAL_EXPRESSION, r, false, null); + return r; + } + + /* ********************************************************** */ + // 'private' | 'external' | 'wrapped' + public static boolean modifier(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "modifier")) return false; + boolean r; + Marker m = enter_section_(b, l, _NONE_, ""); + r = consumeToken(b, "private"); + if (!r) r = consumeToken(b, "external"); + if (!r) r = consumeToken(b, "wrapped"); + exit_section_(b, l, m, BNF_MODIFIER, r, false, null); + return r; + } + + /* ********************************************************** */ + // quantified | predicate + static boolean option(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "option")) return false; + boolean r; + Marker m = enter_section_(b); + r = quantified(b, l + 1); + if (!r) r = predicate(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + /* ********************************************************** */ + // '(' expression ')' + public static boolean paren_expression(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "paren_expression")) return false; + if (!nextTokenIs(b, BNF_LEFT_PAREN)) return false; + boolean r, p; + Marker m = enter_section_(b, l, _NONE_, null); + r = consumeToken(b, BNF_LEFT_PAREN); + p = r; // pin = 1 + r = r && report_error_(b, expression(b, l + 1)); + r = p && consumeToken(b, BNF_RIGHT_PAREN) && r; + exit_section_(b, l, m, BNF_PAREN_EXPRESSION, r, p, null); + return r || p; + } + + /* ********************************************************** */ + // predicate_sign simple + public static boolean predicate(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "predicate")) return false; + if (!nextTokenIs(b, "", BNF_OP_NOT, BNF_OP_AND)) return false; + boolean r; + Marker m = enter_section_(b, l, _NONE_, ""); + r = predicate_sign(b, l + 1); + r = r && simple(b, l + 1); + exit_section_(b, l, m, BNF_PREDICATE, r, false, null); + return r; + } + + /* ********************************************************** */ + // '&' | '!' + public static boolean predicate_sign(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "predicate_sign")) return false; + if (!nextTokenIs(b, "", BNF_OP_NOT, BNF_OP_AND)) return false; + boolean r; + Marker m = enter_section_(b, l, _NONE_, ""); + r = consumeToken(b, BNF_OP_AND); + if (!r) r = consumeToken(b, BNF_OP_NOT); + exit_section_(b, l, m, BNF_PREDICATE_SIGN, r, false, null); + return r; + } + + /* ********************************************************** */ + // '[' expression ']' | simple quantifier? + public static boolean quantified(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "quantified")) return false; + boolean r; + Marker m = enter_section_(b, l, _COLLAPSE_, ""); + r = quantified_0(b, l + 1); + if (!r) r = quantified_1(b, l + 1); + exit_section_(b, l, m, BNF_QUANTIFIED, r, false, null); + return r; + } + + // '[' expression ']' + private static boolean quantified_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "quantified_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_LEFT_BRACKET); + r = r && expression(b, l + 1); + r = r && consumeToken(b, BNF_RIGHT_BRACKET); + exit_section_(b, m, null, r); + return r; + } + + // simple quantifier? + private static boolean quantified_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "quantified_1")) return false; + boolean r; + Marker m = enter_section_(b); + r = simple(b, l + 1); + r = r && quantified_1_1(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // quantifier? + private static boolean quantified_1_1(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "quantified_1_1")) return false; + quantifier(b, l + 1); + return true; + } + + /* ********************************************************** */ + // '?' | '+' | '*' + public static boolean quantifier(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "quantifier")) return false; + boolean r; + Marker m = enter_section_(b, l, _NONE_, ""); + r = consumeToken(b, BNF_OP_OPT); + if (!r) r = consumeToken(b, BNF_OP_ONEMORE); + if (!r) r = consumeToken(b, BNF_OP_ZEROMORE); + exit_section_(b, l, m, BNF_QUANTIFIER, r, false, null); + return r; + } + + /* ********************************************************** */ + // id + public static boolean reference_or_token(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "reference_or_token")) return false; + if (!nextTokenIs(b, BNF_ID)) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_ID); + exit_section_(b, m, BNF_REFERENCE_OR_TOKEN, r); + return r; + } + + /* ********************************************************** */ + // modifier* id '::=' expression attrs? ';'? + public static boolean rule(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "rule")) return false; + boolean r, p; + Marker m = enter_section_(b, l, _NONE_, ""); + r = rule_0(b, l + 1); + r = r && consumeToken(b, BNF_ID); + r = r && consumeToken(b, BNF_OP_IS); + p = r; // pin = 3 + r = r && report_error_(b, expression(b, l + 1)); + r = p && report_error_(b, rule_4(b, l + 1)) && r; + r = p && rule_5(b, l + 1) && r; + exit_section_(b, l, m, BNF_RULE, r, p, rule_recover_until_parser_); + return r || p; + } + + // modifier* + private static boolean rule_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "rule_0")) return false; + int c = current_position_(b); + while (true) { + if (!modifier(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "rule_0", c)) break; + c = current_position_(b); + } + return true; + } + + // attrs? + private static boolean rule_4(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "rule_4")) return false; + attrs(b, l + 1); + return true; + } + + // ';'? + private static boolean rule_5(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "rule_5")) return false; + consumeToken(b, BNF_SEMICOLON); + return true; + } + + /* ********************************************************** */ + // !'{' + static boolean rule_recover_until(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "rule_recover_until")) return false; + boolean r; + Marker m = enter_section_(b, l, _NOT_, null); + r = !consumeToken(b, BNF_LEFT_BRACE); + exit_section_(b, l, m, null, r, false, null); + return r; + } + + /* ********************************************************** */ + // option + + public static boolean sequence(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "sequence")) return false; + boolean r; + Marker m = enter_section_(b, l, _COLLAPSE_, ""); + r = option(b, l + 1); + int c = current_position_(b); + while (r) { + if (!option(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "sequence", c)) break; + c = current_position_(b); + } + exit_section_(b, l, m, BNF_SEQUENCE, r, false, null); + return r; + } + + /* ********************************************************** */ + // !(modifier* id '::=' ) reference_or_token | literal_expression | paren_expression + static boolean simple(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "simple")) return false; + boolean r; + Marker m = enter_section_(b); + r = simple_0(b, l + 1); + if (!r) r = literal_expression(b, l + 1); + if (!r) r = paren_expression(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // !(modifier* id '::=' ) reference_or_token + private static boolean simple_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "simple_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = simple_0_0(b, l + 1); + r = r && reference_or_token(b, l + 1); + exit_section_(b, m, null, r); + return r; + } + + // !(modifier* id '::=' ) + private static boolean simple_0_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "simple_0_0")) return false; + boolean r; + Marker m = enter_section_(b, l, _NOT_, null); + r = !simple_0_0_0(b, l + 1); + exit_section_(b, l, m, null, r, false, null); + return r; + } + + // modifier* id '::=' + private static boolean simple_0_0_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "simple_0_0_0")) return false; + boolean r; + Marker m = enter_section_(b); + r = simple_0_0_0_0(b, l + 1); + r = r && consumeToken(b, BNF_ID); + r = r && consumeToken(b, BNF_OP_IS); + exit_section_(b, m, null, r); + return r; + } + + // modifier* + private static boolean simple_0_0_0_0(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "simple_0_0_0_0")) return false; + int c = current_position_(b); + while (true) { + if (!modifier(b, l + 1)) break; + if (!empty_element_parsed_guard_(b, "simple_0_0_0_0", c)) break; + c = current_position_(b); + } + return true; + } + + /* ********************************************************** */ + // string + public static boolean string_literal_expression(PsiBuilder b, int l) { + if (!recursion_guard_(b, l, "string_literal_expression")) return false; + if (!nextTokenIs(b, BNF_STRING)) return false; + boolean r; + Marker m = enter_section_(b); + r = consumeToken(b, BNF_STRING); + exit_section_(b, m, BNF_STRING_LITERAL_EXPRESSION, r); + return r; + } + + final static Parser attr_recover_until_parser_ = new Parser() { + public boolean parse(PsiBuilder b, int l) { + return attr_recover_until(b, l + 1); + } + }; + final static Parser rule_recover_until_parser_ = new Parser() { + public boolean parse(PsiBuilder b, int l) { + return rule_recover_until(b, l + 1); + } + }; +} diff --git a/samples/Java/JFlexLexer.java b/samples/Java/JFlexLexer.java new file mode 100644 index 00000000..e54564fb --- /dev/null +++ b/samples/Java/JFlexLexer.java @@ -0,0 +1,482 @@ +/* The following code was generated by JFlex 1.4.3 on 28/01/16 11:27 */ + +package test; +import com.intellij.lexer.*; +import com.intellij.psi.tree.IElementType; +import static org.intellij.grammar.psi.BnfTypes.*; + + +/** + * This class is a scanner generated by + * JFlex 1.4.3 + * on 28/01/16 11:27 from the specification file + * /home/abigail/code/intellij-grammar-kit-test/src/test/_GrammarLexer.flex + */ +public class _GrammarLexer implements FlexLexer { + /** initial size of the lookahead buffer */ + private static final int ZZ_BUFFERSIZE = 16384; + + /** lexical states */ + public static final int YYINITIAL = 0; + + /** + * ZZ_LEXSTATE[l] is the state in the DFA for the lexical state l + * ZZ_LEXSTATE[l+1] is the state in the DFA for the lexical state l + * at the beginning of a line + * l is of the form l = 2*k, k a non negative integer + */ + private static final int ZZ_LEXSTATE[] = { + 0, 0 + }; + + /** + * Translates characters to character classes + */ + private static final String ZZ_CMAP_PACKED = + "\11\0\1\1\1\1\1\0\1\1\1\1\22\0\1\1\101\0\1\13"+ + "\1\0\1\3\1\14\1\0\1\10\1\0\1\2\3\0\1\12\1\7"+ + "\3\0\1\6\1\4\1\5\1\11\uff8a\0"; + + /** + * Translates characters to character classes + */ + private static final char [] ZZ_CMAP = zzUnpackCMap(ZZ_CMAP_PACKED); + + /** + * Translates DFA states to action switch labels. + */ + private static final int [] ZZ_ACTION = zzUnpackAction(); + + private static final String ZZ_ACTION_PACKED_0 = + "\1\0\1\1\1\2\3\1\1\3\10\0\1\4\1\5"; + + private static int [] zzUnpackAction() { + int [] result = new int[17]; + int offset = 0; + offset = zzUnpackAction(ZZ_ACTION_PACKED_0, offset, result); + return result; + } + + private static int zzUnpackAction(String packed, int offset, int [] result) { + int i = 0; /* index in packed string */ + int j = offset; /* index in unpacked array */ + int l = packed.length(); + while (i < l) { + int count = packed.charAt(i++); + int value = packed.charAt(i++); + do result[j++] = value; while (--count > 0); + } + return j; + } + + + /** + * Translates a state to a row index in the transition table + */ + private static final int [] ZZ_ROWMAP = zzUnpackRowMap(); + + private static final String ZZ_ROWMAP_PACKED_0 = + "\0\0\0\15\0\32\0\47\0\64\0\101\0\15\0\116"+ + "\0\133\0\150\0\165\0\202\0\217\0\234\0\251\0\15"+ + "\0\15"; + + private static int [] zzUnpackRowMap() { + int [] result = new int[17]; + int offset = 0; + offset = zzUnpackRowMap(ZZ_ROWMAP_PACKED_0, offset, result); + return result; + } + + private static int zzUnpackRowMap(String packed, int offset, int [] result) { + int i = 0; /* index in packed string */ + int j = offset; /* index in unpacked array */ + int l = packed.length(); + while (i < l) { + int high = packed.charAt(i++) << 16; + result[j++] = high | packed.charAt(i++); + } + return j; + } + + /** + * The transition table of the DFA + */ + private static final int [] ZZ_TRANS = zzUnpackTrans(); + + private static final String ZZ_TRANS_PACKED_0 = + "\1\2\1\3\1\4\1\2\1\5\2\2\1\6\5\2"+ + "\16\0\1\3\16\0\1\7\16\0\1\10\20\0\1\11"+ + "\11\0\1\12\20\0\1\13\4\0\1\14\25\0\1\15"+ + "\10\0\1\16\21\0\1\17\10\0\1\20\12\0\1\21"+ + "\6\0"; + + private static int [] zzUnpackTrans() { + int [] result = new int[182]; + int offset = 0; + offset = zzUnpackTrans(ZZ_TRANS_PACKED_0, offset, result); + return result; + } + + private static int zzUnpackTrans(String packed, int offset, int [] result) { + int i = 0; /* index in packed string */ + int j = offset; /* index in unpacked array */ + int l = packed.length(); + while (i < l) { + int count = packed.charAt(i++); + int value = packed.charAt(i++); + value--; + do result[j++] = value; while (--count > 0); + } + return j; + } + + + /* error codes */ + private static final int ZZ_UNKNOWN_ERROR = 0; + private static final int ZZ_NO_MATCH = 1; + private static final int ZZ_PUSHBACK_2BIG = 2; + private static final char[] EMPTY_BUFFER = new char[0]; + private static final int YYEOF = -1; + private static java.io.Reader zzReader = null; // Fake + + /* error messages for the codes above */ + private static final String ZZ_ERROR_MSG[] = { + "Unkown internal scanner error", + "Error: could not match input", + "Error: pushback value was too large" + }; + + /** + * ZZ_ATTRIBUTE[aState] contains the attributes of state aState + */ + private static final int [] ZZ_ATTRIBUTE = zzUnpackAttribute(); + + private static final String ZZ_ATTRIBUTE_PACKED_0 = + "\1\0\1\11\4\1\1\11\10\0\2\11"; + + private static int [] zzUnpackAttribute() { + int [] result = new int[17]; + int offset = 0; + offset = zzUnpackAttribute(ZZ_ATTRIBUTE_PACKED_0, offset, result); + return result; + } + + private static int zzUnpackAttribute(String packed, int offset, int [] result) { + int i = 0; /* index in packed string */ + int j = offset; /* index in unpacked array */ + int l = packed.length(); + while (i < l) { + int count = packed.charAt(i++); + int value = packed.charAt(i++); + do result[j++] = value; while (--count > 0); + } + return j; + } + + /** the current state of the DFA */ + private int zzState; + + /** the current lexical state */ + private int zzLexicalState = YYINITIAL; + + /** this buffer contains the current text to be matched and is + the source of the yytext() string */ + private CharSequence zzBuffer = ""; + + /** this buffer may contains the current text array to be matched when it is cheap to acquire it */ + private char[] zzBufferArray; + + /** the textposition at the last accepting state */ + private int zzMarkedPos; + + /** the textposition at the last state to be included in yytext */ + private int zzPushbackPos; + + /** the current text position in the buffer */ + private int zzCurrentPos; + + /** startRead marks the beginning of the yytext() string in the buffer */ + private int zzStartRead; + + /** endRead marks the last character in the buffer, that has been read + from input */ + private int zzEndRead; + + /** + * zzAtBOL == true <=> the scanner is currently at the beginning of a line + */ + private boolean zzAtBOL = true; + + /** zzAtEOF == true <=> the scanner is at the EOF */ + private boolean zzAtEOF; + + /* user code: */ + public _GrammarLexer() { + this((java.io.Reader)null); + } + + + /** + * Creates a new scanner + * + * @param in the java.io.Reader to read input from. + */ + public _GrammarLexer(java.io.Reader in) { + this.zzReader = in; + } + + + /** + * Unpacks the compressed character translation table. + * + * @param packed the packed character translation table + * @return the unpacked character translation table + */ + private static char [] zzUnpackCMap(String packed) { + char [] map = new char[0x10000]; + int i = 0; /* index in packed string */ + int j = 0; /* index in unpacked array */ + while (i < 52) { + int count = packed.charAt(i++); + char value = packed.charAt(i++); + do map[j++] = value; while (--count > 0); + } + return map; + } + + public final int getTokenStart(){ + return zzStartRead; + } + + public final int getTokenEnd(){ + return getTokenStart() + yylength(); + } + + public void reset(CharSequence buffer, int start, int end,int initialState){ + zzBuffer = buffer; + zzBufferArray = com.intellij.util.text.CharArrayUtil.fromSequenceWithoutCopying(buffer); + zzCurrentPos = zzMarkedPos = zzStartRead = start; + zzPushbackPos = 0; + zzAtEOF = false; + zzAtBOL = true; + zzEndRead = end; + yybegin(initialState); + } + + /** + * Refills the input buffer. + * + * @return false, iff there was new input. + * + * @exception java.io.IOException if any I/O-Error occurs + */ + private boolean zzRefill() throws java.io.IOException { + return true; + } + + + /** + * Returns the current lexical state. + */ + public final int yystate() { + return zzLexicalState; + } + + + /** + * Enters a new lexical state + * + * @param newState the new lexical state + */ + public final void yybegin(int newState) { + zzLexicalState = newState; + } + + + /** + * Returns the text matched by the current regular expression. + */ + public final CharSequence yytext() { + return zzBuffer.subSequence(zzStartRead, zzMarkedPos); + } + + + /** + * Returns the character at position pos from the + * matched text. + * + * It is equivalent to yytext().charAt(pos), but faster + * + * @param pos the position of the character to fetch. + * A value from 0 to yylength()-1. + * + * @return the character at position pos + */ + public final char yycharat(int pos) { + return zzBufferArray != null ? zzBufferArray[zzStartRead+pos]:zzBuffer.charAt(zzStartRead+pos); + } + + + /** + * Returns the length of the matched text region. + */ + public final int yylength() { + return zzMarkedPos-zzStartRead; + } + + + /** + * Reports an error that occured while scanning. + * + * In a wellformed scanner (no or only correct usage of + * yypushback(int) and a match-all fallback rule) this method + * will only be called with things that "Can't Possibly Happen". + * If this method is called, something is seriously wrong + * (e.g. a JFlex bug producing a faulty scanner etc.). + * + * Usual syntax/scanner level error handling should be done + * in error fallback rules. + * + * @param errorCode the code of the errormessage to display + */ + private void zzScanError(int errorCode) { + String message; + try { + message = ZZ_ERROR_MSG[errorCode]; + } + catch (ArrayIndexOutOfBoundsException e) { + message = ZZ_ERROR_MSG[ZZ_UNKNOWN_ERROR]; + } + + throw new Error(message); + } + + + /** + * Pushes the specified amount of characters back into the input stream. + * + * They will be read again by then next call of the scanning method + * + * @param number the number of characters to be read again. + * This number must not be greater than yylength()! + */ + public void yypushback(int number) { + if ( number > yylength() ) + zzScanError(ZZ_PUSHBACK_2BIG); + + zzMarkedPos -= number; + } + + + /** + * Resumes scanning until the next regular expression is matched, + * the end of input is encountered or an I/O-Error occurs. + * + * @return the next token + * @exception java.io.IOException if any I/O-Error occurs + */ + public IElementType advance() throws java.io.IOException { + int zzInput; + int zzAction; + + // cached fields: + int zzCurrentPosL; + int zzMarkedPosL; + int zzEndReadL = zzEndRead; + CharSequence zzBufferL = zzBuffer; + char[] zzBufferArrayL = zzBufferArray; + char [] zzCMapL = ZZ_CMAP; + + int [] zzTransL = ZZ_TRANS; + int [] zzRowMapL = ZZ_ROWMAP; + int [] zzAttrL = ZZ_ATTRIBUTE; + + while (true) { + zzMarkedPosL = zzMarkedPos; + + zzAction = -1; + + zzCurrentPosL = zzCurrentPos = zzStartRead = zzMarkedPosL; + + zzState = ZZ_LEXSTATE[zzLexicalState]; + + + zzForAction: { + while (true) { + + if (zzCurrentPosL < zzEndReadL) + zzInput = (zzBufferArrayL != null ? zzBufferArrayL[zzCurrentPosL++] : zzBufferL.charAt(zzCurrentPosL++)); + else if (zzAtEOF) { + zzInput = YYEOF; + break zzForAction; + } + else { + // store back cached positions + zzCurrentPos = zzCurrentPosL; + zzMarkedPos = zzMarkedPosL; + boolean eof = zzRefill(); + // get translated positions and possibly new buffer + zzCurrentPosL = zzCurrentPos; + zzMarkedPosL = zzMarkedPos; + zzBufferL = zzBuffer; + zzEndReadL = zzEndRead; + if (eof) { + zzInput = YYEOF; + break zzForAction; + } + else { + zzInput = (zzBufferArrayL != null ? zzBufferArrayL[zzCurrentPosL++] : zzBufferL.charAt(zzCurrentPosL++)); + } + } + int zzNext = zzTransL[ zzRowMapL[zzState] + zzCMapL[zzInput] ]; + if (zzNext == -1) break zzForAction; + zzState = zzNext; + + int zzAttributes = zzAttrL[zzState]; + if ( (zzAttributes & 1) == 1 ) { + zzAction = zzState; + zzMarkedPosL = zzCurrentPosL; + if ( (zzAttributes & 8) == 8 ) break zzForAction; + } + + } + } + + // store back cached position + zzMarkedPos = zzMarkedPosL; + + switch (zzAction < 0 ? zzAction : ZZ_ACTION[zzAction]) { + case 1: + { return com.intellij.psi.TokenType.BAD_CHARACTER; + } + case 6: break; + case 4: + { return BNF_STRING; + } + case 7: break; + case 5: + { return BNF_NUMBER; + } + case 8: break; + case 3: + { return BNF_ID; + } + case 9: break; + case 2: + { return com.intellij.psi.TokenType.WHITE_SPACE; + } + case 10: break; + default: + if (zzInput == YYEOF && zzStartRead == zzCurrentPos) { + zzAtEOF = true; + return null; + } + else { + zzScanError(ZZ_NO_MATCH); + } + } + } + } + + +} diff --git a/test/test_generated.rb b/test/test_generated.rb index 164e1fc2..591f50ed 100644 --- a/test/test_generated.rb +++ b/test/test_generated.rb @@ -69,5 +69,11 @@ class TestGenerated < Minitest::Test # Specflow generated_fixture_without_loading_data("Features/BindingCulture.feature.cs") + + # JFlex + generated_sample_loading_data("Java/JFlexLexer.java") + + # GrammarKit + generated_sample_loading_data("Java/GrammarKit.java") end end From 16d9612603c2ebf670745f2160178d4d0e7cd59c Mon Sep 17 00:00:00 2001 From: Paul Chaignon Date: Sun, 31 Jan 2016 10:29:06 +0100 Subject: [PATCH 54/68] Support for YANG language --- lib/linguist/languages.yml | 7 +++++ samples/YANG/sfc-lisp-impl.yang | 55 +++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 samples/YANG/sfc-lisp-impl.yang diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 306dfe9d..9903f2c9 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -3901,6 +3901,13 @@ YAML: - .yaml-tmlanguage ace_mode: yaml +YANG: + type: data + extensions: + - .yang + tm_scope: none + ace_mode: text + Yacc: type: programming extensions: diff --git a/samples/YANG/sfc-lisp-impl.yang b/samples/YANG/sfc-lisp-impl.yang new file mode 100644 index 00000000..761d9829 --- /dev/null +++ b/samples/YANG/sfc-lisp-impl.yang @@ -0,0 +1,55 @@ +module sfc-lisp-impl { + + yang-version 1; + namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sfc-lisp:impl"; + prefix "sfc-lisp-impl"; + + import config { prefix config; revision-date 2013-04-05; } + import rpc-context { prefix rpcx; revision-date 2013-06-17; } + import opendaylight-md-sal-binding { prefix mdsal; revision-date 2013-10-28; } + + + description + "This module contains the base YANG definitions for + sfc-lisp implementation."; + + revision "2015-04-27" { + description + "Initial revision."; + } + + // This is the definition of the service implementation as a module identity + identity sfc-lisp-impl { + base config:module-type; + + // Specifies the prefix for generated java classes. + config:java-name-prefix SfcLisp; + } + + + // Augments the 'configuration' choice node under modules/module. + augment "/config:modules/config:module/config:configuration" { + case sfc-lisp-impl { + when "/config:modules/config:module/config:type = 'sfc-lisp-impl'"; + + //wires in the data-broker service + container data-broker { + uses config:service-ref { + refine type { + mandatory false; + config:required-identity mdsal:binding-async-data-broker; + } + } + } + + container rpc-registry { + uses config:service-ref { + refine type { + mandatory true; + config:required-identity mdsal:binding-rpc-registry; + } + } + } + } + } +} \ No newline at end of file From b235ed1223a97e6a1bdfa23043b7fa084dfd9804 Mon Sep 17 00:00:00 2001 From: Paul Chaignon Date: Sun, 31 Jan 2016 10:31:06 +0100 Subject: [PATCH 55/68] Grammar for YANG from Atom package --- .gitmodules | 3 +++ grammars.yml | 2 ++ lib/linguist/languages.yml | 2 +- vendor/grammars/language-yang | 1 + 4 files changed, 7 insertions(+), 1 deletion(-) create mode 160000 vendor/grammars/language-yang diff --git a/.gitmodules b/.gitmodules index e3dfa2a7..f4d07d4a 100644 --- a/.gitmodules +++ b/.gitmodules @@ -707,3 +707,6 @@ [submodule "vendor/grammars/atom-language-stan"] path = vendor/grammars/atom-language-stan url = https://github.com/jrnold/atom-language-stan +[submodule "vendor/grammars/language-yang"] + path = vendor/grammars/language-yang + url = https://github.com/DzonyKalafut/language-yang.git diff --git a/grammars.yml b/grammars.yml index c8de7b72..8edfce74 100644 --- a/grammars.yml +++ b/grammars.yml @@ -372,6 +372,8 @@ vendor/grammars/language-xbase: - source.harbour vendor/grammars/language-yaml: - source.yaml +vendor/grammars/language-yang/: +- source.yang vendor/grammars/latex.tmbundle: - text.bibtex - text.log.latex diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 9903f2c9..a6813602 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -3905,7 +3905,7 @@ YANG: type: data extensions: - .yang - tm_scope: none + tm_scope: source.yang ace_mode: text Yacc: diff --git a/vendor/grammars/language-yang b/vendor/grammars/language-yang new file mode 160000 index 00000000..c2d4a701 --- /dev/null +++ b/vendor/grammars/language-yang @@ -0,0 +1 @@ +Subproject commit c2d4a701d15e0ea7a1cef4f2fe4396b336504d7c From 7b58b1ea59f82b7229db3e0258cb360a949151b7 Mon Sep 17 00:00:00 2001 From: Arfon Smith Date: Tue, 2 Feb 2016 19:37:35 -0700 Subject: [PATCH 56/68] Adding Perl6-specific grammar. --- .gitmodules | 3 +++ grammars.yml | 4 ++++ lib/linguist/languages.yml | 2 +- vendor/grammars/perl6fe | 1 + 4 files changed, 9 insertions(+), 1 deletion(-) create mode 160000 vendor/grammars/perl6fe diff --git a/.gitmodules b/.gitmodules index f4d07d4a..468dd8b9 100644 --- a/.gitmodules +++ b/.gitmodules @@ -710,3 +710,6 @@ [submodule "vendor/grammars/language-yang"] path = vendor/grammars/language-yang url = https://github.com/DzonyKalafut/language-yang.git +[submodule "vendor/grammars/perl6fe"] + path = vendor/grammars/perl6fe + url = https://github.com/MadcapJake/language-perl6fe.git diff --git a/grammars.yml b/grammars.yml index 8edfce74..4aefd99f 100644 --- a/grammars.yml +++ b/grammars.yml @@ -445,6 +445,10 @@ vendor/grammars/pascal.tmbundle: vendor/grammars/perl.tmbundle/: - source.perl - source.perl.6 +vendor/grammars/perl6fe: +- source.meta-info +- source.perl6fe +- source.regexp.perl6fe vendor/grammars/php-smarty.tmbundle: - text.html.smarty vendor/grammars/php.tmbundle: diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 081da674..9faff3af 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -2654,7 +2654,7 @@ Perl6: - Rexfile interpreters: - perl6 - tm_scope: source.perl.6 + tm_scope: source.perl6fe ace_mode: perl Pickle: diff --git a/vendor/grammars/perl6fe b/vendor/grammars/perl6fe new file mode 160000 index 00000000..8bcf0980 --- /dev/null +++ b/vendor/grammars/perl6fe @@ -0,0 +1 @@ +Subproject commit 8bcf098074c65aac5a73612e214a3b6eb1b2d5f8 From 9fd80bfd67f8087e4b89dd016cc53ea0a420a566 Mon Sep 17 00:00:00 2001 From: Arfon Smith Date: Tue, 2 Feb 2016 22:38:50 -0700 Subject: [PATCH 57/68] Updating to latest perl6 grammar --- vendor/grammars/perl6fe | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/grammars/perl6fe b/vendor/grammars/perl6fe index 8bcf0980..f433aa1d 160000 --- a/vendor/grammars/perl6fe +++ b/vendor/grammars/perl6fe @@ -1 +1 @@ -Subproject commit 8bcf098074c65aac5a73612e214a3b6eb1b2d5f8 +Subproject commit f433aa1dcade27f6da46c0e14779ed06d922cc20 From eb8eb28ca725e6f1f6e3b7de7c18015c630f6ef1 Mon Sep 17 00:00:00 2001 From: Paul Chaignon Date: Wed, 3 Feb 2016 14:19:58 +0100 Subject: [PATCH 58/68] Remove Less grammar --- .gitmodules | 3 --- grammars.yml | 2 -- lib/linguist/languages.yml | 2 +- vendor/grammars/less.tmbundle | 1 - 4 files changed, 1 insertion(+), 7 deletions(-) delete mode 160000 vendor/grammars/less.tmbundle diff --git a/.gitmodules b/.gitmodules index 468dd8b9..e67450e7 100644 --- a/.gitmodules +++ b/.gitmodules @@ -340,9 +340,6 @@ [submodule "vendor/grammars/latex.tmbundle"] path = vendor/grammars/latex.tmbundle url = https://github.com/textmate/latex.tmbundle -[submodule "vendor/grammars/less.tmbundle"] - path = vendor/grammars/less.tmbundle - url = https://github.com/textmate/less.tmbundle [submodule "vendor/grammars/lilypond.tmbundle"] path = vendor/grammars/lilypond.tmbundle url = https://github.com/textmate/lilypond.tmbundle diff --git a/grammars.yml b/grammars.yml index 4aefd99f..d7da2b7c 100644 --- a/grammars.yml +++ b/grammars.yml @@ -381,8 +381,6 @@ vendor/grammars/latex.tmbundle: - text.tex.latex - text.tex.latex.beamer - text.tex.latex.memoir -vendor/grammars/less.tmbundle: -- source.css.less vendor/grammars/lilypond.tmbundle: - source.lilypond vendor/grammars/liquid.tmbundle: diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 9faff3af..2b095e3f 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -1834,7 +1834,7 @@ Less: group: CSS extensions: - .less - tm_scope: source.css.less + tm_scope: none ace_mode: less color: "#A1D9A1" diff --git a/vendor/grammars/less.tmbundle b/vendor/grammars/less.tmbundle deleted file mode 160000 index 7ef97ad5..00000000 --- a/vendor/grammars/less.tmbundle +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 7ef97ad5f15d2a136afe4d5cf568fc8ee79675b7 From 889a39534066b711480b21f0156266ed413136b4 Mon Sep 17 00:00:00 2001 From: Paul Chaignon Date: Wed, 3 Feb 2016 14:22:23 +0100 Subject: [PATCH 59/68] Grammar for Less from Atom package --- .gitmodules | 3 +++ grammars.yml | 2 ++ lib/linguist/languages.yml | 2 +- vendor/grammars/language-less | 1 + 4 files changed, 7 insertions(+), 1 deletion(-) create mode 160000 vendor/grammars/language-less diff --git a/.gitmodules b/.gitmodules index e67450e7..41e39127 100644 --- a/.gitmodules +++ b/.gitmodules @@ -710,3 +710,6 @@ [submodule "vendor/grammars/perl6fe"] path = vendor/grammars/perl6fe url = https://github.com/MadcapJake/language-perl6fe.git +[submodule "vendor/grammars/language-less"] + path = vendor/grammars/language-less + url = https://github.com/atom/language-less.git diff --git a/grammars.yml b/grammars.yml index d7da2b7c..6139fb0a 100644 --- a/grammars.yml +++ b/grammars.yml @@ -354,6 +354,8 @@ vendor/grammars/language-javascript: vendor/grammars/language-jsoniq/: - source.jq - source.xq +vendor/grammars/language-less/: +- source.css.less vendor/grammars/language-maxscript: - source.maxscript vendor/grammars/language-ncl: diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 2b095e3f..9faff3af 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -1834,7 +1834,7 @@ Less: group: CSS extensions: - .less - tm_scope: none + tm_scope: source.css.less ace_mode: less color: "#A1D9A1" diff --git a/vendor/grammars/language-less b/vendor/grammars/language-less new file mode 160000 index 00000000..a4ded260 --- /dev/null +++ b/vendor/grammars/language-less @@ -0,0 +1 @@ +Subproject commit a4ded2608cd22b8dba79df2025ad8dcf62158ddb From 7b3efb185fed3f9f00415cd1ec9a967684bdcf07 Mon Sep 17 00:00:00 2001 From: Arfon Smith Date: Fri, 12 Feb 2016 16:25:05 -0700 Subject: [PATCH 60/68] Updating grammars --- vendor/grammars/Elm.tmLanguage | 2 +- vendor/grammars/Handlebars | 2 +- vendor/grammars/MagicPython | 2 +- vendor/grammars/NimLime | 2 +- vendor/grammars/atom-fsharp | 2 +- vendor/grammars/atom-language-stan | 2 +- vendor/grammars/c.tmbundle | 2 +- vendor/grammars/chapel-tmbundle | 2 +- vendor/grammars/d.tmbundle | 2 +- vendor/grammars/elixir-tmbundle | 2 +- vendor/grammars/factor | 2 +- vendor/grammars/gap-tmbundle | 2 +- vendor/grammars/html.tmbundle | 2 +- vendor/grammars/language-babel | 2 +- vendor/grammars/language-coffee-script | 2 +- vendor/grammars/language-crystal | 2 +- vendor/grammars/language-gfm | 2 +- vendor/grammars/language-javascript | 2 +- vendor/grammars/language-jsoniq | 2 +- vendor/grammars/language-renpy | 2 +- vendor/grammars/language-yaml | 2 +- vendor/grammars/latex.tmbundle | 2 +- vendor/grammars/objective-c.tmbundle | 2 +- vendor/grammars/perl6fe | 2 +- vendor/grammars/php.tmbundle | 2 +- vendor/grammars/powershell | 2 +- vendor/grammars/sublime-nginx | 2 +- vendor/grammars/sublime-pony | 2 +- vendor/grammars/sublime-rust | 2 +- vendor/grammars/sublime-typescript | 2 +- vendor/grammars/sublime_cobol | 2 +- vendor/grammars/swift.tmbundle | 2 +- vendor/grammars/vue-syntax-highlight | 2 +- 33 files changed, 33 insertions(+), 33 deletions(-) diff --git a/vendor/grammars/Elm.tmLanguage b/vendor/grammars/Elm.tmLanguage index 494145bd..437033bd 160000 --- a/vendor/grammars/Elm.tmLanguage +++ b/vendor/grammars/Elm.tmLanguage @@ -1 +1 @@ -Subproject commit 494145bded21fd6e18454d9d9d5aa8b67ced1b96 +Subproject commit 437033bd48350b49bc0dfa734206bfa0ba5de337 diff --git a/vendor/grammars/Handlebars b/vendor/grammars/Handlebars index 7e5396d9..2e9f6884 160000 --- a/vendor/grammars/Handlebars +++ b/vendor/grammars/Handlebars @@ -1 +1 @@ -Subproject commit 7e5396d9be8b05c705be614afb022f755a7edc17 +Subproject commit 2e9f68840073f5a3de13cbfed10e31b199760654 diff --git a/vendor/grammars/MagicPython b/vendor/grammars/MagicPython index d7855f98..82c76aff 160000 --- a/vendor/grammars/MagicPython +++ b/vendor/grammars/MagicPython @@ -1 +1 @@ -Subproject commit d7855f98ce6fbe0703c2753b0deae263a27fd8f8 +Subproject commit 82c76aff704192fb9ed1f505360635f575f13b5a diff --git a/vendor/grammars/NimLime b/vendor/grammars/NimLime index 4e60e565..4db349dd 160000 --- a/vendor/grammars/NimLime +++ b/vendor/grammars/NimLime @@ -1 +1 @@ -Subproject commit 4e60e5657fcddde6bf8b4bce0030ecb3154e7dfa +Subproject commit 4db349dda5219a37e99a0375e2a5d8a001fbf20e diff --git a/vendor/grammars/atom-fsharp b/vendor/grammars/atom-fsharp index 4376748e..13eb4d43 160000 --- a/vendor/grammars/atom-fsharp +++ b/vendor/grammars/atom-fsharp @@ -1 +1 @@ -Subproject commit 4376748ed29b5e104cd0dd807c8c9f09a09ec361 +Subproject commit 13eb4d4347be4e8dd825ba174e4271f9cb0f9cab diff --git a/vendor/grammars/atom-language-stan b/vendor/grammars/atom-language-stan index f6911bef..f8d855ea 160000 --- a/vendor/grammars/atom-language-stan +++ b/vendor/grammars/atom-language-stan @@ -1 +1 @@ -Subproject commit f6911bef091e155098f113ec5aaa89bd55283705 +Subproject commit f8d855eab960b4dd140c0f469a809401544850b8 diff --git a/vendor/grammars/c.tmbundle b/vendor/grammars/c.tmbundle index f1424d49..5b5d3663 160000 --- a/vendor/grammars/c.tmbundle +++ b/vendor/grammars/c.tmbundle @@ -1 +1 @@ -Subproject commit f1424d49dd879f6c5a67d3afe6c48a17f538345a +Subproject commit 5b5d366309e8a549ce6fff5bcd9aa57ffb6dae77 diff --git a/vendor/grammars/chapel-tmbundle b/vendor/grammars/chapel-tmbundle index b657738c..469476b2 160000 --- a/vendor/grammars/chapel-tmbundle +++ b/vendor/grammars/chapel-tmbundle @@ -1 +1 @@ -Subproject commit b657738cc05cddd8d8a20afa307e93e607d17d97 +Subproject commit 469476b285adf6c4a09973fd12e97ec831afd050 diff --git a/vendor/grammars/d.tmbundle b/vendor/grammars/d.tmbundle index e6551b27..bc27d5e5 160000 --- a/vendor/grammars/d.tmbundle +++ b/vendor/grammars/d.tmbundle @@ -1 +1 @@ -Subproject commit e6551b27e30960a272758afb7da323aa402d70e6 +Subproject commit bc27d5e552b28eaceb76cfe93bd7c27a1105b57d diff --git a/vendor/grammars/elixir-tmbundle b/vendor/grammars/elixir-tmbundle index 13d94e0e..a7aa16e3 160000 --- a/vendor/grammars/elixir-tmbundle +++ b/vendor/grammars/elixir-tmbundle @@ -1 +1 @@ -Subproject commit 13d94e0ec98c2447adc80230708afa94012ec825 +Subproject commit a7aa16e349f673d70dd43fcab72e2a3f219cb9d7 diff --git a/vendor/grammars/factor b/vendor/grammars/factor index fffb5014..97d1ec75 160000 --- a/vendor/grammars/factor +++ b/vendor/grammars/factor @@ -1 +1 @@ -Subproject commit fffb501486083871377237b9320159de140a943a +Subproject commit 97d1ec759eb9fa2ace83c62685b6b36faec05981 diff --git a/vendor/grammars/gap-tmbundle b/vendor/grammars/gap-tmbundle index ce4ff698..52c8fafb 160000 --- a/vendor/grammars/gap-tmbundle +++ b/vendor/grammars/gap-tmbundle @@ -1 +1 @@ -Subproject commit ce4ff698ebb4a692f0b9a3ab5e30853b87fd2b34 +Subproject commit 52c8fafb664fb7909223f92403e26fe3bfde0cdc diff --git a/vendor/grammars/html.tmbundle b/vendor/grammars/html.tmbundle index 58640721..7710c79b 160000 --- a/vendor/grammars/html.tmbundle +++ b/vendor/grammars/html.tmbundle @@ -1 +1 @@ -Subproject commit 58640721ca0ea5210786086940f789c299366791 +Subproject commit 7710c79b161314fe937b05b13b1101e78f5dc62e diff --git a/vendor/grammars/language-babel b/vendor/grammars/language-babel index e63e3e29..e2fd09d7 160000 --- a/vendor/grammars/language-babel +++ b/vendor/grammars/language-babel @@ -1 +1 @@ -Subproject commit e63e3e29e93327e1d0979e3acb37a0e2acc463c0 +Subproject commit e2fd09d7d9caf1f336b3a35581e55387723dcbf3 diff --git a/vendor/grammars/language-coffee-script b/vendor/grammars/language-coffee-script index afa4f6f1..93e935ec 160000 --- a/vendor/grammars/language-coffee-script +++ b/vendor/grammars/language-coffee-script @@ -1 +1 @@ -Subproject commit afa4f6f157e02fd1a635357e8117e4c726a84542 +Subproject commit 93e935ec1d54815c182d683d3dfb9583db4e269b diff --git a/vendor/grammars/language-crystal b/vendor/grammars/language-crystal index 5c0e60fd..f87d6864 160000 --- a/vendor/grammars/language-crystal +++ b/vendor/grammars/language-crystal @@ -1 +1 @@ -Subproject commit 5c0e60fd5323b5f093317ffda9721db3fcd72672 +Subproject commit f87d68645c9a4243a7942e6b414449f1eec8a3e7 diff --git a/vendor/grammars/language-gfm b/vendor/grammars/language-gfm index 6b60f7d8..298a8a3e 160000 --- a/vendor/grammars/language-gfm +++ b/vendor/grammars/language-gfm @@ -1 +1 @@ -Subproject commit 6b60f7d89bb7afc67640d46eb8a7cc83e266b30d +Subproject commit 298a8a3eb180f1fa6b8a8bc77c2147e355c8cafd diff --git a/vendor/grammars/language-javascript b/vendor/grammars/language-javascript index 162309ab..f68e4bfe 160000 --- a/vendor/grammars/language-javascript +++ b/vendor/grammars/language-javascript @@ -1 +1 @@ -Subproject commit 162309ab8525c0f1b8602514e6aa347c9889437e +Subproject commit f68e4bfe54a3b9d16450223f401d2fb16453897f diff --git a/vendor/grammars/language-jsoniq b/vendor/grammars/language-jsoniq index 7a971acf..008a2458 160000 --- a/vendor/grammars/language-jsoniq +++ b/vendor/grammars/language-jsoniq @@ -1 +1 @@ -Subproject commit 7a971acf1c2001e6b2e9afc7b69b6ff8a2ae39ce +Subproject commit 008a24589d32a8f8bbd55985d27844a7cda842e5 diff --git a/vendor/grammars/language-renpy b/vendor/grammars/language-renpy index 00e92d74..cc2f1c69 160000 --- a/vendor/grammars/language-renpy +++ b/vendor/grammars/language-renpy @@ -1 +1 @@ -Subproject commit 00e92d7450a97c33b40931113b64034bac27e010 +Subproject commit cc2f1c69f0b1c1d121aa5648422fc70d86dca7cf diff --git a/vendor/grammars/language-yaml b/vendor/grammars/language-yaml index e1d62e5a..249fdeed 160000 --- a/vendor/grammars/language-yaml +++ b/vendor/grammars/language-yaml @@ -1 +1 @@ -Subproject commit e1d62e5aff1c475ea3eedc3b03a52ce0e750ec89 +Subproject commit 249fdeed7877ccdcba123645f32cc6597bce4b37 diff --git a/vendor/grammars/latex.tmbundle b/vendor/grammars/latex.tmbundle index ed94d309..82986b93 160000 --- a/vendor/grammars/latex.tmbundle +++ b/vendor/grammars/latex.tmbundle @@ -1 +1 @@ -Subproject commit ed94d3091501cfc6d9aa293ea241d1c0ad6c6395 +Subproject commit 82986b93a4f4ae7aab52445d8b7742b9af635d05 diff --git a/vendor/grammars/objective-c.tmbundle b/vendor/grammars/objective-c.tmbundle index 1bade8a1..fdcedb95 160000 --- a/vendor/grammars/objective-c.tmbundle +++ b/vendor/grammars/objective-c.tmbundle @@ -1 +1 @@ -Subproject commit 1bade8a1c919c358fc4a6d83ba93e98e419ffede +Subproject commit fdcedb95de8846220c49f769fee91045188767d9 diff --git a/vendor/grammars/perl6fe b/vendor/grammars/perl6fe index f433aa1d..12fa46f7 160000 --- a/vendor/grammars/perl6fe +++ b/vendor/grammars/perl6fe @@ -1 +1 @@ -Subproject commit f433aa1dcade27f6da46c0e14779ed06d922cc20 +Subproject commit 12fa46f7bbf646616df1120ed8cfee3e1fcb75de diff --git a/vendor/grammars/php.tmbundle b/vendor/grammars/php.tmbundle index 2ecaa60d..3ed4837b 160000 --- a/vendor/grammars/php.tmbundle +++ b/vendor/grammars/php.tmbundle @@ -1 +1 @@ -Subproject commit 2ecaa60d92b92d4c07f243207ba1d5b2114bb70a +Subproject commit 3ed4837b43d3f650ebb525b068636281942883a0 diff --git a/vendor/grammars/powershell b/vendor/grammars/powershell index 982ae21d..b4ea52c5 160000 --- a/vendor/grammars/powershell +++ b/vendor/grammars/powershell @@ -1 +1 @@ -Subproject commit 982ae21d54b3affc498131515ebbfca6b186ac16 +Subproject commit b4ea52c51c5111fdda1d24103082d9580472c31b diff --git a/vendor/grammars/sublime-nginx b/vendor/grammars/sublime-nginx index ba28d29d..fcf644ec 160000 --- a/vendor/grammars/sublime-nginx +++ b/vendor/grammars/sublime-nginx @@ -1 +1 @@ -Subproject commit ba28d29de729f01d39a29fa214a6818112b48803 +Subproject commit fcf644ecea021ab8a6bc171f415f8df0b005b31e diff --git a/vendor/grammars/sublime-pony b/vendor/grammars/sublime-pony index 384ba3ed..b5eb8f8e 160000 --- a/vendor/grammars/sublime-pony +++ b/vendor/grammars/sublime-pony @@ -1 +1 @@ -Subproject commit 384ba3ed980189f0cc140d3fb86455f880fffeb0 +Subproject commit b5eb8f8e97e5253de8d81cfa4826cfb5815f2944 diff --git a/vendor/grammars/sublime-rust b/vendor/grammars/sublime-rust index 06a278cc..621e4f61 160000 --- a/vendor/grammars/sublime-rust +++ b/vendor/grammars/sublime-rust @@ -1 +1 @@ -Subproject commit 06a278ccfaf5e542d26a95d66c734b4407bc4912 +Subproject commit 621e4f6117531d8fe299eb5584a6be766df1822e diff --git a/vendor/grammars/sublime-typescript b/vendor/grammars/sublime-typescript index 0504a558..26fd717a 160000 --- a/vendor/grammars/sublime-typescript +++ b/vendor/grammars/sublime-typescript @@ -1 +1 @@ -Subproject commit 0504a5582b6e5d814630259c87f2d8f8e6027c47 +Subproject commit 26fd717a79d1984e76bbe6d958c5c4bbf0179049 diff --git a/vendor/grammars/sublime_cobol b/vendor/grammars/sublime_cobol index ad15734a..3d2b6dbc 160000 --- a/vendor/grammars/sublime_cobol +++ b/vendor/grammars/sublime_cobol @@ -1 +1 @@ -Subproject commit ad15734a4a9798a006525f53a968565fee1411b1 +Subproject commit 3d2b6dbcd1b27023150ff9d8ab47953706d070b8 diff --git a/vendor/grammars/swift.tmbundle b/vendor/grammars/swift.tmbundle index f5ce5833..32141e91 160000 --- a/vendor/grammars/swift.tmbundle +++ b/vendor/grammars/swift.tmbundle @@ -1 +1 @@ -Subproject commit f5ce5833a914f71aa1ab7df0a262104acd3436ef +Subproject commit 32141e91eeffbbd29482d5b1a0131eec4613e73e diff --git a/vendor/grammars/vue-syntax-highlight b/vendor/grammars/vue-syntax-highlight index a2336ddc..f20c9bab 160000 --- a/vendor/grammars/vue-syntax-highlight +++ b/vendor/grammars/vue-syntax-highlight @@ -1 +1 @@ -Subproject commit a2336ddc7ea01876caa31793f806250cad3b53e6 +Subproject commit f20c9bab7e71738f421e6edc1aab8839ee05d85a From bd95ac0beb4acfb68a505f0a1a5a40b6f00585ad Mon Sep 17 00:00:00 2001 From: Arfon Smith Date: Fri, 12 Feb 2016 16:27:29 -0700 Subject: [PATCH 61/68] Bumping version --- lib/linguist/version.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/linguist/version.rb b/lib/linguist/version.rb index ba44bb5b..bc1707a9 100644 --- a/lib/linguist/version.rb +++ b/lib/linguist/version.rb @@ -1,3 +1,3 @@ module Linguist - VERSION = "4.7.4" + VERSION = "4.7.5" end From 8de50edb41952b70f44f8199a4995e669c3e5a80 Mon Sep 17 00:00:00 2001 From: Arfon Smith Date: Fri, 12 Feb 2016 17:05:44 -0700 Subject: [PATCH 62/68] ruby for example --- lib/linguist/strategy/modeline.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/linguist/strategy/modeline.rb b/lib/linguist/strategy/modeline.rb index ece98a1f..eb5a8a5f 100644 --- a/lib/linguist/strategy/modeline.rb +++ b/lib/linguist/strategy/modeline.rb @@ -5,12 +5,12 @@ module Linguist # First form vim modeline # [text]{white}{vi:|vim:|ex:}[white]{options} - # ex: 'vim: syntax=perl' + # ex: 'vim: syntax=ruby' VIM_MODELINE_1 = /(?:vim|vi|ex):\s*(?:ft|filetype|syntax)=(\w+)\s?/i # Second form vim modeline (compatible with some versions of Vi) # [text]{white}{vi:|vim:|Vim:|ex:}[white]se[t] {options}:[text] - # ex: 'vim set syntax=perl:' + # ex: 'vim set syntax=ruby:' VIM_MODELINE_2 = /(?:vim|vi|Vim|ex):\s*se(?:t)?.*\s(?:ft|filetype|syntax)=(\w+)\s?.*:/i MODELINES = [EMACS_MODELINE, VIM_MODELINE_1, VIM_MODELINE_2] From fb9f271720c13f109e34cd818cf8a26b23d821b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Skytt=C3=A4?= Date: Sun, 14 Feb 2016 21:57:02 +0200 Subject: [PATCH 63/68] Shell: Add shell-script alias for recognizing Emacs modelines --- lib/linguist/languages.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 14519a61..1f04c03c 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -3274,6 +3274,7 @@ Shell: color: "#89e051" aliases: - sh + - shell-script - bash - zsh extensions: From 17a946358831a776fcacbe8be7dbed29c2dbac10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Skytt=C3=A4?= Date: Sun, 14 Feb 2016 22:08:21 +0200 Subject: [PATCH 64/68] Makefile: Add Makefile.am and Makefile.in filenames --- lib/linguist/languages.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 14519a61..e1c0f970 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -2031,6 +2031,8 @@ Makefile: - Makefile - Makefile.inc - makefile + - Makefile.am + - Makefile.in interpreters: - make ace_mode: makefile From 5c4129f85bb2d5af921cd826f4d6e75072bc1fe9 Mon Sep 17 00:00:00 2001 From: Paul Chaignon Date: Fri, 19 Feb 2016 08:50:01 +0100 Subject: [PATCH 65/68] Move HACKING.rst.txt to sample directory The file was incorrectly placed at the root of the repo --- HACKING.rst.txt => samples/reStructuredText/HACKING.rst.txt | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename HACKING.rst.txt => samples/reStructuredText/HACKING.rst.txt (100%) diff --git a/HACKING.rst.txt b/samples/reStructuredText/HACKING.rst.txt similarity index 100% rename from HACKING.rst.txt rename to samples/reStructuredText/HACKING.rst.txt From b08c5a8421bbc69d503d82d74cc905b31f078ff8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Skytt=C3=A4?= Date: Sun, 14 Feb 2016 22:05:53 +0200 Subject: [PATCH 66/68] Shell: Add .sh.in extension --- lib/linguist/languages.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 1f04c03c..2e2f5fce 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -3285,6 +3285,7 @@ Shell: - .command - .fcgi - .ksh + - .sh.in - .tmux - .tool - .zsh From 1c20c541917da5a1aa2650381810f65a2cfafbcb Mon Sep 17 00:00:00 2001 From: Arfon Smith Date: Fri, 19 Feb 2016 16:55:38 -0700 Subject: [PATCH 67/68] Fixing ordering of makefile filenames --- lib/linguist/languages.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 3e015b2b..4d61ca61 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -2029,10 +2029,10 @@ Makefile: - GNUmakefile - Kbuild - Makefile - - Makefile.inc - - makefile - Makefile.am - Makefile.in + - Makefile.inc + - makefile interpreters: - make ace_mode: makefile From 864a6c0a205aba086955eb5a2430e60d4a42b089 Mon Sep 17 00:00:00 2001 From: Michael Zhou Date: Fri, 19 Feb 2016 18:23:15 -0500 Subject: [PATCH 68/68] Add .bzl extension to Python in languages.yml .bzl is the extension for Skylark, a subset of Python that is used to define build extensions for the Bazel build system. Eg: https://github.com/bazelbuild/bazel/blob/master/tools/build_rules/closure/closure_js_binary.bzl --- lib/linguist/languages.yml | 1 + samples/Python/closure_js_binary.bzl | 124 +++++++++++++++++++++++++++ 2 files changed, 125 insertions(+) create mode 100644 samples/Python/closure_js_binary.bzl diff --git a/lib/linguist/languages.yml b/lib/linguist/languages.yml index 4d61ca61..b497d88b 100644 --- a/lib/linguist/languages.yml +++ b/lib/linguist/languages.yml @@ -2822,6 +2822,7 @@ Python: color: "#3572A5" extensions: - .py + - .bzl - .cgi - .fcgi - .gyp diff --git a/samples/Python/closure_js_binary.bzl b/samples/Python/closure_js_binary.bzl new file mode 100644 index 00000000..c4453ffb --- /dev/null +++ b/samples/Python/closure_js_binary.bzl @@ -0,0 +1,124 @@ +# Copyright 2015 The Bazel Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Build definitions for JavaScript binaries compiled with the Closure Compiler. + +A single file is produced with the _compiled.js suffix. + +By default, the name of the entry point is assumed to be the same as that of the +build target. This behaviour may be overridden with the "main" attribute. + +The optimization level may be set with the "compilation_level" attribute. +Supported values are: unobfuscated, simple, and advanced. + +Example: + + closure_js_binary( + name = "hello", + compilation_level = "simple", + language_in = "ecmascript6", + language_out = "ecmascript3", + externs = ["//third_party/javascript/google_cast/cast.js"], + deps = [ + "@closure_library//:closure_library", + ":hello_lib", + ], + ) + +This rule will produce hello_combined.js. +""" + +_COMPILATION_LEVELS = { + "whitespace_only": [ + "--compilation_level=WHITESPACE_ONLY", + "--formatting=PRETTY_PRINT" + ], + "simple": ["--compilation_level=SIMPLE"], + "advanced": ["--compilation_level=ADVANCED"] +} + +_SUPPORTED_LANGUAGES = { + "es3": ["ES3"], + "ecmascript3": ["ECMASCRIPT3"], + "es5": ["ES5"], + "ecmascript5": ["ECMASCRIPT5"], + "es5_strict": ["ES5_STRICT"], + "ecmascript5_strict": ["ECMASCRIPT5_STRICT"], + "es6": ["ES6"], + "ecmascript6": ["ECMASCRIPT6"], + "es6_strict": ["ES6_STRICT"], + "ecmascript6_strict": ["ECMASCRIPT6_STRICT"], + "es6_typed": ["ES6_TYPED"], + "ecmascript6_typed": ["ECMASCRIPT6_TYPED"], +} + +def _impl(ctx): + externs = set(order="compile") + srcs = set(order="compile") + for dep in ctx.attr.deps: + externs += dep.transitive_js_externs + srcs += dep.transitive_js_srcs + + args = [ + "--entry_point=goog:%s" % ctx.attr.main, + "--js_output_file=%s" % ctx.outputs.out.path, + "--dependency_mode=LOOSE", + "--warning_level=VERBOSE", + ] + (["--js=%s" % src.path for src in srcs] + + ["--externs=%s" % extern.path for extern in externs]) + + # Set the compilation level. + if ctx.attr.compilation_level in _COMPILATION_LEVELS: + args += _COMPILATION_LEVELS[ctx.attr.compilation_level] + else: + fail("Invalid compilation_level '%s', expected one of %s" % + (ctx.attr.compilation_level, _COMPILATION_LEVELS.keys())) + + # Set the language in. + if ctx.attr.language_in in _SUPPORTED_LANGUAGES: + args += "--language_in=" + _SUPPORTED_LANGUAGES[ctx.attr.language_in] + else: + fail("Invalid language_in '%s', expected one of %s" % + (ctx.attr.language_in, _SUPPORTED_LANGUAGES.keys())) + + # Set the language out. + if ctx.attr.language_out in _SUPPORTED_LANGUAGES: + args += "--language_out=" + _SUPPORTED_LANGUAGES[ctx.attr.language_out] + else: + fail("Invalid language_out '%s', expected one of %s" % + (ctx.attr.language_out, _SUPPORTED_LANGUAGES.keys())) + + ctx.action( + inputs=list(srcs) + list(externs), + outputs=[ctx.outputs.out], + arguments=args, + executable=ctx.executable._closure_compiler) + + return struct(files=set([ctx.outputs.out])) + +closure_js_binary = rule( + implementation=_impl, + attrs={ + "deps": attr.label_list( + allow_files=False, + providers=["transitive_js_externs", "transitive_js_srcs"]), + "main": attr.string(default="%{name}"), + "compilation_level": attr.string(default="advanced"), + "language_in": attr.string(default="ecmascript6"), + "language_out": attr.string(default="ecmascript3"), + "_closure_compiler": attr.label( + default=Label("//external:closure_compiler_"), + executable=True), + }, + outputs={"out": "%{name}_combined.js"})