mirror of
				https://github.com/KevinMidboe/linguist.git
				synced 2025-10-29 17:50:22 +00:00 
			
		
		
		
	Compare commits
	
		
			22 Commits
		
	
	
		
			revert-384
			...
			kivikakk/g
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | adfb438a42 | ||
|  | 0ed1b821d7 | ||
|  | 1a769c4665 | ||
|  | e7e64bf39a | ||
|  | e4b9430024 | ||
|  | a76805e40d | ||
|  | 8d27845f8c | ||
|  | 9a8ab45b6f | ||
|  | e335d48625 | ||
|  | 4f46155c05 | ||
|  | 38901d51d2 | ||
|  | ded0dc74e0 | ||
|  | c5d1bb5370 | ||
|  | c8ca48856b | ||
|  | 7be6fb0138 | ||
|  | 8c516655bc | ||
|  | 9dceffce2f | ||
|  | 33be70eb28 | ||
|  | 9c4dc3047c | ||
|  | d8e5f3c965 | ||
|  | 71bf640a47 | ||
|  | c9b3d19c6f | 
							
								
								
									
										3
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -8,3 +8,6 @@ lib/linguist/samples.json | ||||
| /node_modules | ||||
| test/fixtures/ace_modes.json | ||||
| /vendor/gems/ | ||||
| /tmp | ||||
| *.bundle | ||||
| *.so | ||||
|   | ||||
							
								
								
									
										18
									
								
								.gitmodules
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										18
									
								
								.gitmodules
									
									
									
									
										vendored
									
									
								
							| @@ -439,9 +439,6 @@ | ||||
| [submodule "vendor/grammars/sublime-golo"] | ||||
| 	path = vendor/grammars/sublime-golo | ||||
| 	url = https://github.com/TypeUnsafe/sublime-golo | ||||
| [submodule "vendor/grammars/JSyntax"] | ||||
| 	path = vendor/grammars/JSyntax | ||||
| 	url = https://github.com/bcj/JSyntax | ||||
| [submodule "vendor/grammars/TXL"] | ||||
| 	path = vendor/grammars/TXL | ||||
| 	url = https://github.com/MikeHoffert/Sublime-Text-TXL-syntax | ||||
| @@ -868,6 +865,9 @@ | ||||
| [submodule "vendor/grammars/language-ballerina"] | ||||
| 	path = vendor/grammars/language-ballerina | ||||
| 	url = https://github.com/ballerinalang/plugin-vscode | ||||
| [submodule "vendor/grammars/language-yara"] | ||||
| 	path = vendor/grammars/language-yara | ||||
| 	url = https://github.com/blacktop/language-yara | ||||
| [submodule "vendor/grammars/language-ruby"] | ||||
| 	path = vendor/grammars/language-ruby | ||||
| 	url = https://github.com/atom/language-ruby | ||||
| @@ -883,3 +883,15 @@ | ||||
| [submodule "vendor/grammars/atom-language-julia"] | ||||
| 	path = vendor/grammars/atom-language-julia | ||||
| 	url = https://github.com/JuliaEditorSupport/atom-language-julia | ||||
| [submodule "vendor/grammars/language-cwl"] | ||||
| 	path = vendor/grammars/language-cwl | ||||
| 	url = https://github.com/manabuishii/language-cwl | ||||
| [submodule "vendor/grammars/Syntax-highlighting-for-PostCSS"] | ||||
| 	path = vendor/grammars/Syntax-highlighting-for-PostCSS | ||||
| 	url = https://github.com/hudochenkov/Syntax-highlighting-for-PostCSS | ||||
| [submodule "vendor/grammars/javadoc.tmbundle"] | ||||
| 	path = vendor/grammars/javadoc.tmbundle | ||||
| 	url = https://github.com/textmate/javadoc.tmbundle | ||||
| [submodule "vendor/grammars/JSyntax"] | ||||
| 	path = vendor/grammars/JSyntax | ||||
| 	url = https://github.com/tikkanz/JSyntax | ||||
|   | ||||
| @@ -93,6 +93,7 @@ Linguist is maintained with :heart: by: | ||||
| - **@BenEddy** (GitHub staff) | ||||
| - **@Caged** (GitHub staff) | ||||
| - **@grantr** (GitHub staff) | ||||
| - **@kivikakk** (GitHub staff) | ||||
| - **@larsbrinkhoff** | ||||
| - **@lildude** (GitHub staff) | ||||
| - **@pchaigno** | ||||
|   | ||||
							
								
								
									
										20
									
								
								Rakefile
									
									
									
									
									
								
							
							
						
						
									
										20
									
								
								Rakefile
									
									
									
									
									
								
							| @@ -1,6 +1,7 @@ | ||||
| require 'bundler/setup' | ||||
| require 'rake/clean' | ||||
| require 'rake/testtask' | ||||
| require 'rake/extensiontask' | ||||
| require 'yaml' | ||||
| require 'yajl' | ||||
| require 'open-uri' | ||||
| @@ -10,8 +11,14 @@ task :default => :test | ||||
|  | ||||
| Rake::TestTask.new | ||||
|  | ||||
| gem_spec = Gem::Specification.load('github-linguist.gemspec') | ||||
|  | ||||
| Rake::ExtensionTask.new('linguist', gem_spec) do |ext| | ||||
|   ext.lib_dir = File.join('lib', 'linguist') | ||||
| end | ||||
|  | ||||
| # Extend test task to check for samples and fetch latest Ace modes | ||||
| task :test => [:check_samples, :fetch_ace_modes] | ||||
| task :test => [:compile, :check_samples, :fetch_ace_modes] | ||||
|  | ||||
| desc "Check that we have samples.json generated" | ||||
| task :check_samples do | ||||
| @@ -34,15 +41,22 @@ task :fetch_ace_modes do | ||||
|   end | ||||
| end | ||||
|  | ||||
| task :samples do | ||||
| task :samples => :compile do | ||||
|   require 'linguist/samples' | ||||
|   json = Yajl.dump(Linguist::Samples.data, :pretty => true) | ||||
|   File.write 'lib/linguist/samples.json', json | ||||
| end | ||||
|  | ||||
| task :flex do | ||||
|   if `flex -V` !~ /^flex \d+\.\d+\.\d+/ | ||||
|     fail "flex not detected" | ||||
|   end | ||||
|   system "cd ext/linguist && flex tokenizer.l" | ||||
| end | ||||
|  | ||||
| task :build_gem => :samples do | ||||
|   rm_rf "grammars" | ||||
|   sh "script/convert-grammars" | ||||
|   sh "script/grammar-compiler compile -o grammars || true" | ||||
|   languages = YAML.load_file("lib/linguist/languages.yml") | ||||
|   File.write("lib/linguist/languages.json", Yajl.dump(languages)) | ||||
|   `gem build github-linguist.gemspec` | ||||
|   | ||||
| @@ -117,9 +117,8 @@ def git_linguist(args) | ||||
|   end | ||||
|  | ||||
|   parser.parse!(args) | ||||
|  | ||||
|   git_dir = `git rev-parse --git-dir`.strip | ||||
|   raise "git-linguist must be run in a Git repository (#{Dir.pwd})" unless $?.success? | ||||
|   raise "git-linguist must be run in a Git repository" unless $?.success? | ||||
|   wrapper = GitLinguist.new(git_dir, commit, incremental) | ||||
|  | ||||
|   case args.pop | ||||
| @@ -141,6 +140,10 @@ def git_linguist(args) | ||||
|     $stderr.print(parser.help) | ||||
|     exit 1 | ||||
|   end | ||||
| rescue Exception => e | ||||
|   $stderr.puts e.message | ||||
|   $stderr.puts e.backtrace | ||||
|   exit 1 | ||||
| end | ||||
|  | ||||
| git_linguist(ARGV) | ||||
|   | ||||
							
								
								
									
										3
									
								
								ext/linguist/extconf.rb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								ext/linguist/extconf.rb
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,3 @@ | ||||
| require 'mkmf' | ||||
| dir_config('linguist') | ||||
| create_makefile('linguist/linguist') | ||||
							
								
								
									
										2226
									
								
								ext/linguist/lex.linguist_yy.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2226
									
								
								ext/linguist/lex.linguist_yy.c
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										336
									
								
								ext/linguist/lex.linguist_yy.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										336
									
								
								ext/linguist/lex.linguist_yy.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,336 @@ | ||||
| #ifndef linguist_yyHEADER_H | ||||
| #define linguist_yyHEADER_H 1 | ||||
| #define linguist_yyIN_HEADER 1 | ||||
|  | ||||
| #line 6 "lex.linguist_yy.h" | ||||
|  | ||||
| #define  YY_INT_ALIGNED short int | ||||
|  | ||||
| /* A lexical scanner generated by flex */ | ||||
|  | ||||
| #define FLEX_SCANNER | ||||
| #define YY_FLEX_MAJOR_VERSION 2 | ||||
| #define YY_FLEX_MINOR_VERSION 5 | ||||
| #define YY_FLEX_SUBMINOR_VERSION 35 | ||||
| #if YY_FLEX_SUBMINOR_VERSION > 0 | ||||
| #define FLEX_BETA | ||||
| #endif | ||||
|  | ||||
| /* First, we deal with  platform-specific or compiler-specific issues. */ | ||||
|  | ||||
| /* begin standard C headers. */ | ||||
| #include <stdio.h> | ||||
| #include <string.h> | ||||
| #include <errno.h> | ||||
| #include <stdlib.h> | ||||
|  | ||||
| /* end standard C headers. */ | ||||
|  | ||||
| /* flex integer type definitions */ | ||||
|  | ||||
| #ifndef FLEXINT_H | ||||
| #define FLEXINT_H | ||||
|  | ||||
| /* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */ | ||||
|  | ||||
| #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L | ||||
|  | ||||
| /* C99 says to define __STDC_LIMIT_MACROS before including stdint.h, | ||||
|  * if you want the limit (max/min) macros for int types.  | ||||
|  */ | ||||
| #ifndef __STDC_LIMIT_MACROS | ||||
| #define __STDC_LIMIT_MACROS 1 | ||||
| #endif | ||||
|  | ||||
| #include <inttypes.h> | ||||
| typedef int8_t flex_int8_t; | ||||
| typedef uint8_t flex_uint8_t; | ||||
| typedef int16_t flex_int16_t; | ||||
| typedef uint16_t flex_uint16_t; | ||||
| typedef int32_t flex_int32_t; | ||||
| typedef uint32_t flex_uint32_t; | ||||
| typedef uint64_t flex_uint64_t; | ||||
| #else | ||||
| typedef signed char flex_int8_t; | ||||
| typedef short int flex_int16_t; | ||||
| typedef int flex_int32_t; | ||||
| typedef unsigned char flex_uint8_t;  | ||||
| typedef unsigned short int flex_uint16_t; | ||||
| typedef unsigned int flex_uint32_t; | ||||
| #endif /* ! C99 */ | ||||
|  | ||||
| /* Limits of integral types. */ | ||||
| #ifndef INT8_MIN | ||||
| #define INT8_MIN               (-128) | ||||
| #endif | ||||
| #ifndef INT16_MIN | ||||
| #define INT16_MIN              (-32767-1) | ||||
| #endif | ||||
| #ifndef INT32_MIN | ||||
| #define INT32_MIN              (-2147483647-1) | ||||
| #endif | ||||
| #ifndef INT8_MAX | ||||
| #define INT8_MAX               (127) | ||||
| #endif | ||||
| #ifndef INT16_MAX | ||||
| #define INT16_MAX              (32767) | ||||
| #endif | ||||
| #ifndef INT32_MAX | ||||
| #define INT32_MAX              (2147483647) | ||||
| #endif | ||||
| #ifndef UINT8_MAX | ||||
| #define UINT8_MAX              (255U) | ||||
| #endif | ||||
| #ifndef UINT16_MAX | ||||
| #define UINT16_MAX             (65535U) | ||||
| #endif | ||||
| #ifndef UINT32_MAX | ||||
| #define UINT32_MAX             (4294967295U) | ||||
| #endif | ||||
|  | ||||
| #endif /* ! FLEXINT_H */ | ||||
|  | ||||
| #ifdef __cplusplus | ||||
|  | ||||
| /* The "const" storage-class-modifier is valid. */ | ||||
| #define YY_USE_CONST | ||||
|  | ||||
| #else	/* ! __cplusplus */ | ||||
|  | ||||
| /* C99 requires __STDC__ to be defined as 1. */ | ||||
| #if defined (__STDC__) | ||||
|  | ||||
| #define YY_USE_CONST | ||||
|  | ||||
| #endif	/* defined (__STDC__) */ | ||||
| #endif	/* ! __cplusplus */ | ||||
|  | ||||
| #ifdef YY_USE_CONST | ||||
| #define yyconst const | ||||
| #else | ||||
| #define yyconst | ||||
| #endif | ||||
|  | ||||
| /* An opaque pointer. */ | ||||
| #ifndef YY_TYPEDEF_YY_SCANNER_T | ||||
| #define YY_TYPEDEF_YY_SCANNER_T | ||||
| typedef void* yyscan_t; | ||||
| #endif | ||||
|  | ||||
| /* For convenience, these vars (plus the bison vars far below) | ||||
|    are macros in the reentrant scanner. */ | ||||
| #define yyin yyg->yyin_r | ||||
| #define yyout yyg->yyout_r | ||||
| #define yyextra yyg->yyextra_r | ||||
| #define yyleng yyg->yyleng_r | ||||
| #define yytext yyg->yytext_r | ||||
| #define yylineno (YY_CURRENT_BUFFER_LVALUE->yy_bs_lineno) | ||||
| #define yycolumn (YY_CURRENT_BUFFER_LVALUE->yy_bs_column) | ||||
| #define yy_flex_debug yyg->yy_flex_debug_r | ||||
|  | ||||
| /* Size of default input buffer. */ | ||||
| #ifndef YY_BUF_SIZE | ||||
| #define YY_BUF_SIZE 16384 | ||||
| #endif | ||||
|  | ||||
| #ifndef YY_TYPEDEF_YY_BUFFER_STATE | ||||
| #define YY_TYPEDEF_YY_BUFFER_STATE | ||||
| typedef struct yy_buffer_state *YY_BUFFER_STATE; | ||||
| #endif | ||||
|  | ||||
| #ifndef YY_TYPEDEF_YY_SIZE_T | ||||
| #define YY_TYPEDEF_YY_SIZE_T | ||||
| typedef size_t yy_size_t; | ||||
| #endif | ||||
|  | ||||
| #ifndef YY_STRUCT_YY_BUFFER_STATE | ||||
| #define YY_STRUCT_YY_BUFFER_STATE | ||||
| struct yy_buffer_state | ||||
| 	{ | ||||
| 	FILE *yy_input_file; | ||||
|  | ||||
| 	char *yy_ch_buf;		/* input buffer */ | ||||
| 	char *yy_buf_pos;		/* current position in input buffer */ | ||||
|  | ||||
| 	/* Size of input buffer in bytes, not including room for EOB | ||||
| 	 * characters. | ||||
| 	 */ | ||||
| 	yy_size_t yy_buf_size; | ||||
|  | ||||
| 	/* Number of characters read into yy_ch_buf, not including EOB | ||||
| 	 * characters. | ||||
| 	 */ | ||||
| 	yy_size_t yy_n_chars; | ||||
|  | ||||
| 	/* Whether we "own" the buffer - i.e., we know we created it, | ||||
| 	 * and can realloc() it to grow it, and should free() it to | ||||
| 	 * delete it. | ||||
| 	 */ | ||||
| 	int yy_is_our_buffer; | ||||
|  | ||||
| 	/* Whether this is an "interactive" input source; if so, and | ||||
| 	 * if we're using stdio for input, then we want to use getc() | ||||
| 	 * instead of fread(), to make sure we stop fetching input after | ||||
| 	 * each newline. | ||||
| 	 */ | ||||
| 	int yy_is_interactive; | ||||
|  | ||||
| 	/* Whether we're considered to be at the beginning of a line. | ||||
| 	 * If so, '^' rules will be active on the next match, otherwise | ||||
| 	 * not. | ||||
| 	 */ | ||||
| 	int yy_at_bol; | ||||
|  | ||||
|     int yy_bs_lineno; /**< The line count. */ | ||||
|     int yy_bs_column; /**< The column count. */ | ||||
|      | ||||
| 	/* Whether to try to fill the input buffer when we reach the | ||||
| 	 * end of it. | ||||
| 	 */ | ||||
| 	int yy_fill_buffer; | ||||
|  | ||||
| 	int yy_buffer_status; | ||||
|  | ||||
| 	}; | ||||
| #endif /* !YY_STRUCT_YY_BUFFER_STATE */ | ||||
|  | ||||
| void linguist_yyrestart (FILE *input_file ,yyscan_t yyscanner ); | ||||
| void linguist_yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ,yyscan_t yyscanner ); | ||||
| YY_BUFFER_STATE linguist_yy_create_buffer (FILE *file,int size ,yyscan_t yyscanner ); | ||||
| void linguist_yy_delete_buffer (YY_BUFFER_STATE b ,yyscan_t yyscanner ); | ||||
| void linguist_yy_flush_buffer (YY_BUFFER_STATE b ,yyscan_t yyscanner ); | ||||
| void linguist_yypush_buffer_state (YY_BUFFER_STATE new_buffer ,yyscan_t yyscanner ); | ||||
| void linguist_yypop_buffer_state (yyscan_t yyscanner ); | ||||
|  | ||||
| YY_BUFFER_STATE linguist_yy_scan_buffer (char *base,yy_size_t size ,yyscan_t yyscanner ); | ||||
| YY_BUFFER_STATE linguist_yy_scan_string (yyconst char *yy_str ,yyscan_t yyscanner ); | ||||
| YY_BUFFER_STATE linguist_yy_scan_bytes (yyconst char *bytes,yy_size_t len ,yyscan_t yyscanner ); | ||||
|  | ||||
| void *linguist_yyalloc (yy_size_t ,yyscan_t yyscanner ); | ||||
| void *linguist_yyrealloc (void *,yy_size_t ,yyscan_t yyscanner ); | ||||
| void linguist_yyfree (void * ,yyscan_t yyscanner ); | ||||
|  | ||||
| /* Begin user sect3 */ | ||||
|  | ||||
| #define yytext_ptr yytext_r | ||||
|  | ||||
| #ifdef YY_HEADER_EXPORT_START_CONDITIONS | ||||
| #define INITIAL 0 | ||||
| #define sgml 1 | ||||
| #define c_comment 2 | ||||
| #define xml_comment 3 | ||||
| #define haskell_comment 4 | ||||
| #define ocaml_comment 5 | ||||
| #define python_dcomment 6 | ||||
| #define python_scomment 7 | ||||
|  | ||||
| #endif | ||||
|  | ||||
| #ifndef YY_NO_UNISTD_H | ||||
| /* Special case for "unistd.h", since it is non-ANSI. We include it way | ||||
|  * down here because we want the user's section 1 to have been scanned first. | ||||
|  * The user has a chance to override it with an option. | ||||
|  */ | ||||
| #include <unistd.h> | ||||
| #endif | ||||
|  | ||||
| #define YY_EXTRA_TYPE struct tokenizer_extra * | ||||
|  | ||||
| int linguist_yylex_init (yyscan_t* scanner); | ||||
|  | ||||
| int linguist_yylex_init_extra (YY_EXTRA_TYPE user_defined,yyscan_t* scanner); | ||||
|  | ||||
| /* Accessor methods to globals. | ||||
|    These are made visible to non-reentrant scanners for convenience. */ | ||||
|  | ||||
| int linguist_yylex_destroy (yyscan_t yyscanner ); | ||||
|  | ||||
| int linguist_yyget_debug (yyscan_t yyscanner ); | ||||
|  | ||||
| void linguist_yyset_debug (int debug_flag ,yyscan_t yyscanner ); | ||||
|  | ||||
| YY_EXTRA_TYPE linguist_yyget_extra (yyscan_t yyscanner ); | ||||
|  | ||||
| void linguist_yyset_extra (YY_EXTRA_TYPE user_defined ,yyscan_t yyscanner ); | ||||
|  | ||||
| FILE *linguist_yyget_in (yyscan_t yyscanner ); | ||||
|  | ||||
| void linguist_yyset_in  (FILE * in_str ,yyscan_t yyscanner ); | ||||
|  | ||||
| FILE *linguist_yyget_out (yyscan_t yyscanner ); | ||||
|  | ||||
| void linguist_yyset_out  (FILE * out_str ,yyscan_t yyscanner ); | ||||
|  | ||||
| yy_size_t linguist_yyget_leng (yyscan_t yyscanner ); | ||||
|  | ||||
| char *linguist_yyget_text (yyscan_t yyscanner ); | ||||
|  | ||||
| int linguist_yyget_lineno (yyscan_t yyscanner ); | ||||
|  | ||||
| void linguist_yyset_lineno (int line_number ,yyscan_t yyscanner ); | ||||
|  | ||||
| /* Macros after this point can all be overridden by user definitions in | ||||
|  * section 1. | ||||
|  */ | ||||
|  | ||||
| #ifndef YY_SKIP_YYWRAP | ||||
| #ifdef __cplusplus | ||||
| extern "C" int linguist_yywrap (yyscan_t yyscanner ); | ||||
| #else | ||||
| extern int linguist_yywrap (yyscan_t yyscanner ); | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
| #ifndef yytext_ptr | ||||
| static void yy_flex_strncpy (char *,yyconst char *,int ,yyscan_t yyscanner); | ||||
| #endif | ||||
|  | ||||
| #ifdef YY_NEED_STRLEN | ||||
| static int yy_flex_strlen (yyconst char * ,yyscan_t yyscanner); | ||||
| #endif | ||||
|  | ||||
| #ifndef YY_NO_INPUT | ||||
|  | ||||
| #endif | ||||
|  | ||||
| /* Amount of stuff to slurp up with each read. */ | ||||
| #ifndef YY_READ_BUF_SIZE | ||||
| #define YY_READ_BUF_SIZE 8192 | ||||
| #endif | ||||
|  | ||||
| /* Number of entries by which start-condition stack grows. */ | ||||
| #ifndef YY_START_STACK_INCR | ||||
| #define YY_START_STACK_INCR 25 | ||||
| #endif | ||||
|  | ||||
| /* Default declaration of generated scanner - a define so the user can | ||||
|  * easily add parameters. | ||||
|  */ | ||||
| #ifndef YY_DECL | ||||
| #define YY_DECL_IS_OURS 1 | ||||
|  | ||||
| extern int linguist_yylex (yyscan_t yyscanner); | ||||
|  | ||||
| #define YY_DECL int linguist_yylex (yyscan_t yyscanner) | ||||
| #endif /* !YY_DECL */ | ||||
|  | ||||
| /* yy_get_previous_state - get the state just before the EOB char was reached */ | ||||
|  | ||||
| #undef YY_NEW_FILE | ||||
| #undef YY_FLUSH_BUFFER | ||||
| #undef yy_set_bol | ||||
| #undef yy_new_buffer | ||||
| #undef yy_set_interactive | ||||
| #undef YY_DO_BEFORE_ACTION | ||||
|  | ||||
| #ifdef YY_DECL_IS_OURS | ||||
| #undef YY_DECL_IS_OURS | ||||
| #undef YY_DECL | ||||
| #endif | ||||
|  | ||||
| #line 118 "tokenizer.l" | ||||
|  | ||||
|  | ||||
| #line 335 "lex.linguist_yy.h" | ||||
| #undef linguist_yyIN_HEADER | ||||
| #endif /* linguist_yyHEADER_H */ | ||||
							
								
								
									
										75
									
								
								ext/linguist/linguist.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										75
									
								
								ext/linguist/linguist.c
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,75 @@ | ||||
| #include "ruby.h" | ||||
| #include "linguist.h" | ||||
| #include "lex.linguist_yy.h" | ||||
|  | ||||
| // Anything longer is unlikely to be useful. | ||||
| #define MAX_TOKEN_LEN 32 | ||||
|  | ||||
| int linguist_yywrap(yyscan_t yyscanner) { | ||||
| 	return 1; | ||||
| } | ||||
|  | ||||
| static VALUE rb_tokenizer_extract_tokens(VALUE self, VALUE rb_data) { | ||||
| 	YY_BUFFER_STATE buf; | ||||
| 	yyscan_t scanner; | ||||
| 	struct tokenizer_extra extra; | ||||
| 	VALUE ary, s; | ||||
| 	long len; | ||||
| 	int r; | ||||
|  | ||||
| 	Check_Type(rb_data, T_STRING); | ||||
|  | ||||
| 	len = RSTRING_LEN(rb_data); | ||||
| 	if (len > 100000) | ||||
| 		len = 100000; | ||||
|  | ||||
| 	linguist_yylex_init_extra(&extra, &scanner); | ||||
| 	buf = linguist_yy_scan_bytes(RSTRING_PTR(rb_data), (int) len, scanner); | ||||
|  | ||||
| 	ary = rb_ary_new(); | ||||
| 	do { | ||||
| 		extra.type = NO_ACTION; | ||||
| 		extra.token = NULL; | ||||
| 		r = linguist_yylex(scanner); | ||||
| 		switch (extra.type) { | ||||
| 		case NO_ACTION: | ||||
| 			break; | ||||
| 		case REGULAR_TOKEN: | ||||
| 			len = strlen(extra.token); | ||||
| 			if (len <= MAX_TOKEN_LEN) | ||||
| 				rb_ary_push(ary, rb_str_new(extra.token, len)); | ||||
| 			free(extra.token); | ||||
| 			break; | ||||
| 		case SHEBANG_TOKEN: | ||||
| 			len = strlen(extra.token); | ||||
| 			if (len <= MAX_TOKEN_LEN) { | ||||
| 				s = rb_str_new2("SHEBANG#!"); | ||||
| 				rb_str_cat(s, extra.token, len); | ||||
| 				rb_ary_push(ary, s); | ||||
| 			} | ||||
| 			free(extra.token); | ||||
| 			break; | ||||
| 		case SGML_TOKEN: | ||||
| 			len = strlen(extra.token); | ||||
| 			if (len <= MAX_TOKEN_LEN) { | ||||
| 				s = rb_str_new(extra.token, len); | ||||
| 				rb_str_cat2(s, ">"); | ||||
| 				rb_ary_push(ary, s); | ||||
| 			} | ||||
| 			free(extra.token); | ||||
| 			break; | ||||
| 		} | ||||
| 	} while (r); | ||||
|  | ||||
| 	linguist_yy_delete_buffer(buf, scanner); | ||||
| 	linguist_yylex_destroy(scanner); | ||||
|  | ||||
| 	return ary; | ||||
| } | ||||
|  | ||||
| __attribute__((visibility("default"))) void Init_linguist() { | ||||
| 	VALUE rb_mLinguist = rb_define_module("Linguist"); | ||||
| 	VALUE rb_cTokenizer = rb_define_class_under(rb_mLinguist, "Tokenizer", rb_cObject); | ||||
|  | ||||
| 	rb_define_method(rb_cTokenizer, "extract_tokens", rb_tokenizer_extract_tokens, 1); | ||||
| } | ||||
							
								
								
									
										11
									
								
								ext/linguist/linguist.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								ext/linguist/linguist.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| enum tokenizer_type { | ||||
|   NO_ACTION, | ||||
|   REGULAR_TOKEN, | ||||
|   SHEBANG_TOKEN, | ||||
|   SGML_TOKEN, | ||||
| }; | ||||
|  | ||||
| struct tokenizer_extra { | ||||
|   char *token; | ||||
|   enum tokenizer_type type; | ||||
| }; | ||||
							
								
								
									
										119
									
								
								ext/linguist/tokenizer.l
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										119
									
								
								ext/linguist/tokenizer.l
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,119 @@ | ||||
| %{ | ||||
|  | ||||
| #include "linguist.h" | ||||
|  | ||||
| #define feed_token(tok, typ) do { \ | ||||
|     yyextra->token = (tok); \ | ||||
|     yyextra->type = (typ); \ | ||||
|   } while (0) | ||||
|  | ||||
| #define eat_until_eol() do { \ | ||||
|     int c; \ | ||||
|     while ((c = input(yyscanner)) != '\n' && c != EOF && c); \ | ||||
|     if (c == EOF || !c) \ | ||||
|       return 0; \ | ||||
|   } while (0) | ||||
|  | ||||
| #define eat_until_unescaped(q) do { \ | ||||
|     int c; \ | ||||
|     while ((c = input(yyscanner)) != EOF && c) { \ | ||||
|       if (c == '\n') \ | ||||
|         break; \ | ||||
|       if (c == '\\') { \ | ||||
|         c = input(yyscanner); \ | ||||
|         if (c == EOF || !c) \ | ||||
|           return 0; \ | ||||
|       } else if (c == q) \ | ||||
|         break; \ | ||||
|     } \ | ||||
|     if (c == EOF || !c) \ | ||||
|       return 0; \ | ||||
|   } while (0) | ||||
|  | ||||
| %} | ||||
|  | ||||
| %option never-interactive yywrap reentrant nounput warn nodefault header-file="lex.linguist_yy.h" extra-type="struct tokenizer_extra *" prefix="linguist_yy" | ||||
| %x sgml c_comment xml_comment haskell_comment ocaml_comment python_dcomment python_scomment | ||||
|  | ||||
| %% | ||||
|  | ||||
| ^#![ \t]*([[:alnum:]_\/]*\/)?env([ \t]+([^ \t=]*=[^ \t]*))*[ \t]+[[:alpha:]_]+ { | ||||
|     const char *off = strrchr(yytext, ' '); | ||||
|     if (!off) | ||||
|       off = yytext; | ||||
|     else | ||||
|       ++off; | ||||
|     feed_token(strdup(off), SHEBANG_TOKEN); | ||||
|     eat_until_eol(); | ||||
|     return 1; | ||||
|   } | ||||
|  | ||||
| ^#![ \t]*[[:alpha:]_\/]+ { | ||||
|     const char *off = strrchr(yytext, '/'); | ||||
|     if (!off) | ||||
|       off = yytext; | ||||
|     else | ||||
|       ++off; | ||||
|     if (strcmp(off, "env") == 0) { | ||||
|       eat_until_eol(); | ||||
|     } else { | ||||
|       feed_token(strdup(off), SHEBANG_TOKEN); | ||||
|       eat_until_eol(); | ||||
|       return 1; | ||||
|     } | ||||
|   } | ||||
|  | ||||
| ^[ \t]*(\/\/|--|\#|%|\")" ".*   { /* nothing */ } | ||||
|  | ||||
| "/*"                              { BEGIN(c_comment); } | ||||
|   /* See below for xml_comment start. */ | ||||
| "{-"                              { BEGIN(haskell_comment); } | ||||
| "(*"                              { BEGIN(ocaml_comment); } | ||||
| "\"\"\""                          { BEGIN(python_dcomment); } | ||||
| "'''"                             { BEGIN(python_scomment); } | ||||
|  | ||||
| <c_comment,xml_comment,haskell_comment,ocaml_comment,python_dcomment,python_scomment>.|\n { /* nothing */ } | ||||
| <c_comment>"*/"                   { BEGIN(INITIAL); } | ||||
| <xml_comment>"-->"                { BEGIN(INITIAL); } | ||||
| <haskell_comment>"-}"             { BEGIN(INITIAL); } | ||||
| <ocaml_comment>"*)"               { BEGIN(INITIAL); } | ||||
| <python_dcomment>"\"\"\""         { BEGIN(INITIAL); } | ||||
| <python_scomment>"'''"            { BEGIN(INITIAL); } | ||||
|  | ||||
| \"\"|''                           { /* nothing */ } | ||||
| \"                                { eat_until_unescaped('"'); } | ||||
| '                                 { eat_until_unescaped('\''); } | ||||
| (0x[0-9a-fA-F]([0-9a-fA-F]|\.)*|[0-9]([0-9]|\.)*)([uU][lL]{0,2}|([eE][-+][0-9]*)?[fFlL]*) { /* nothing */ } | ||||
| \<[[:alnum:]_!./?-]+              { | ||||
|     if (strcmp(yytext, "<!--") == 0) { | ||||
|      BEGIN(xml_comment); | ||||
|     } else { | ||||
|       feed_token(strdup(yytext), SGML_TOKEN); | ||||
|       BEGIN(sgml); | ||||
|       return 1; | ||||
|     } | ||||
|   } | ||||
| <sgml>[[:alnum:]_]+=\"            { feed_token(strndup(yytext, strlen(yytext) - 1), REGULAR_TOKEN); eat_until_unescaped('"'); return 1; } | ||||
| <sgml>[[:alnum:]_]+='             { feed_token(strndup(yytext, strlen(yytext) - 1), REGULAR_TOKEN); eat_until_unescaped('\''); return 1; } | ||||
| <sgml>[[:alnum:]_]+=[[:alnum:]_]* { feed_token(strdup(yytext), REGULAR_TOKEN); *(strchr(yyextra->token, '=') + 1) = 0; return 1; } | ||||
| <sgml>[[:alnum:]_]+               { feed_token(strdup(yytext), REGULAR_TOKEN); return 1; } | ||||
| <sgml>\>                          { BEGIN(INITIAL); } | ||||
| <sgml>.|\n                        { /* nothing */ } | ||||
| ;|\{|\}|\(|\)|\[|\]               { feed_token(strdup(yytext), REGULAR_TOKEN); return 1; } | ||||
| [[:alnum:]_.@#/*]+                { | ||||
|     if (strncmp(yytext, "/*", 2) == 0) { | ||||
|       if (strlen(yytext) >= 4 && strcmp(yytext + strlen(yytext) - 2, "*/") == 0) { | ||||
|         /* nothing */ | ||||
|       } else { | ||||
|         BEGIN(c_comment); | ||||
|       } | ||||
|     } else { | ||||
|       feed_token(strdup(yytext), REGULAR_TOKEN); | ||||
|       return 1; | ||||
|     } | ||||
|   } | ||||
| \<\<?|\+|\-|\*|\/|%|&&?|\|\|?     { feed_token(strdup(yytext), REGULAR_TOKEN); return 1; } | ||||
| .|\n                              { /* nothing */ } | ||||
|  | ||||
| %% | ||||
|  | ||||
| @@ -10,8 +10,9 @@ Gem::Specification.new do |s| | ||||
|   s.homepage = "https://github.com/github/linguist" | ||||
|   s.license  = "MIT" | ||||
|  | ||||
|   s.files = Dir['lib/**/*'] + Dir['grammars/*'] + ['LICENSE'] | ||||
|   s.files = Dir['lib/**/*'] + Dir['ext/**/*'] + Dir['grammars/*'] + ['LICENSE'] | ||||
|   s.executables = ['linguist', 'git-linguist'] | ||||
|   s.extensions = ['ext/linguist/extconf.rb'] | ||||
|  | ||||
|   s.add_dependency 'charlock_holmes', '~> 0.7.5' | ||||
|   s.add_dependency 'escape_utils',    '~> 1.1.0' | ||||
| @@ -19,6 +20,7 @@ Gem::Specification.new do |s| | ||||
|   s.add_dependency 'rugged',          '>= 0.25.1' | ||||
|  | ||||
|   s.add_development_dependency 'minitest', '>= 5.0' | ||||
|   s.add_development_dependency 'rake-compiler', '~> 0.9' | ||||
|   s.add_development_dependency 'mocha' | ||||
|   s.add_development_dependency 'plist', '~>3.1' | ||||
|   s.add_development_dependency 'pry' | ||||
|   | ||||
							
								
								
									
										12
									
								
								grammars.yml
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								grammars.yml
									
									
									
									
									
								
							| @@ -1,4 +1,3 @@ | ||||
| --- | ||||
| https://bitbucket.org/Clams/sublimesystemverilog/get/default.tar.gz: | ||||
| - source.systemverilog | ||||
| - source.ucfconstraints | ||||
| @@ -128,6 +127,9 @@ vendor/grammars/SublimePuppet: | ||||
| - source.puppet | ||||
| vendor/grammars/SublimeXtend: | ||||
| - source.xtend | ||||
| vendor/grammars/Syntax-highlighting-for-PostCSS: | ||||
| - source.css.postcss.sugarss | ||||
| - source.postcss | ||||
| vendor/grammars/TLA: | ||||
| - source.tla | ||||
| vendor/grammars/TXL: | ||||
| @@ -341,6 +343,8 @@ vendor/grammars/java.tmbundle: | ||||
| - source.java-properties | ||||
| - text.html.jsp | ||||
| - text.junit-test-report | ||||
| vendor/grammars/javadoc.tmbundle: | ||||
| - text.html.javadoc | ||||
| vendor/grammars/javascript-objective-j.tmbundle: | ||||
| - source.js.objj | ||||
| vendor/grammars/jflex.tmbundle: | ||||
| @@ -387,6 +391,8 @@ vendor/grammars/language-csound: | ||||
| - source.csound-score | ||||
| vendor/grammars/language-css: | ||||
| - source.css | ||||
| vendor/grammars/language-cwl: | ||||
| - source.cwl | ||||
| vendor/grammars/language-emacs-lisp: | ||||
| - source.emacs.lisp | ||||
| vendor/grammars/language-fontforge: | ||||
| @@ -501,6 +507,8 @@ vendor/grammars/language-yaml: | ||||
| - source.yaml | ||||
| vendor/grammars/language-yang: | ||||
| - source.yang | ||||
| vendor/grammars/language-yara: | ||||
| - source.yara | ||||
| vendor/grammars/latex.tmbundle: | ||||
| - text.bibtex | ||||
| - text.log.latex | ||||
| @@ -567,7 +575,7 @@ vendor/grammars/opa.tmbundle: | ||||
| - source.opa | ||||
| vendor/grammars/openscad.tmbundle: | ||||
| - source.scad | ||||
| vendor/grammars/oz-tmbundle/Syntaxes/Oz.tmLanguage: | ||||
| vendor/grammars/oz-tmbundle: | ||||
| - source.oz | ||||
| vendor/grammars/parrot: | ||||
| - source.parrot.pir | ||||
|   | ||||
| @@ -275,10 +275,8 @@ module Linguist | ||||
|           # also--importantly--without having to duplicate many (potentially | ||||
|           # large) strings. | ||||
|           begin | ||||
|             encoded_newlines = ["\r\n", "\r", "\n"]. | ||||
|               map { |nl| nl.encode(ruby_encoding, "ASCII-8BIT").force_encoding(data.encoding) } | ||||
|  | ||||
|             data.split(Regexp.union(encoded_newlines), -1) | ||||
|              | ||||
|             data.split(encoded_newlines_re, -1) | ||||
|           rescue Encoding::ConverterNotFoundError | ||||
|             # The data is not splittable in the detected encoding.  Assume it's | ||||
|             # one big line. | ||||
| @@ -289,6 +287,51 @@ module Linguist | ||||
|         end | ||||
|     end | ||||
|  | ||||
|     def encoded_newlines_re | ||||
|       @encoded_newlines_re ||= Regexp.union(["\r\n", "\r", "\n"]. | ||||
|                                               map { |nl| nl.encode(ruby_encoding, "ASCII-8BIT").force_encoding(data.encoding) }) | ||||
|  | ||||
|     end | ||||
|  | ||||
|     def first_lines(n) | ||||
|       return lines[0...n] if defined? @lines | ||||
|       return [] unless viewable? && data | ||||
|  | ||||
|       i, c = 0, 0 | ||||
|       while c < n && j = data.index(encoded_newlines_re, i) | ||||
|         i = j + $&.length | ||||
|         c += 1 | ||||
|       end | ||||
|       data[0...i].split(encoded_newlines_re, -1) | ||||
|     end | ||||
|  | ||||
|     def last_lines(n) | ||||
|       if defined? @lines | ||||
|         if n >= @lines.length | ||||
|           @lines | ||||
|         else | ||||
|           lines[-n..-1] | ||||
|         end | ||||
|       end | ||||
|       return [] unless viewable? && data | ||||
|  | ||||
|       no_eol = true | ||||
|       i, c = data.length, 0 | ||||
|       k = i | ||||
|       while c < n && j = data.rindex(encoded_newlines_re, i - 1) | ||||
|         if c == 0 && j + $&.length == i | ||||
|           no_eol = false | ||||
|           n += 1 | ||||
|         end | ||||
|         i = j | ||||
|         k = j + $&.length | ||||
|         c += 1 | ||||
|       end | ||||
|       r = data[k..-1].split(encoded_newlines_re, -1) | ||||
|       r.pop if !no_eol | ||||
|       r | ||||
|     end | ||||
|  | ||||
|     # Public: Get number of lines of code | ||||
|     # | ||||
|     # Requires Blob#data | ||||
|   | ||||
| @@ -3,6 +3,8 @@ require 'linguist/tokenizer' | ||||
| module Linguist | ||||
|   # Language bayesian classifier. | ||||
|   class Classifier | ||||
|     CLASSIFIER_CONSIDER_BYTES = 50 * 1024 | ||||
|  | ||||
|     # Public: Use the classifier to detect language of the blob. | ||||
|     # | ||||
|     # blob               - An object that quacks like a blob. | ||||
| @@ -17,7 +19,7 @@ module Linguist | ||||
|     # Returns an Array of Language objects, most probable first. | ||||
|     def self.call(blob, possible_languages) | ||||
|       language_names = possible_languages.map(&:name) | ||||
|       classify(Samples.cache, blob.data, language_names).map do |name, _| | ||||
|       classify(Samples.cache, blob.data[0...CLASSIFIER_CONSIDER_BYTES], language_names).map do |name, _| | ||||
|         Language[name] # Return the actual Language objects | ||||
|       end | ||||
|     end | ||||
|   | ||||
| @@ -23,21 +23,21 @@ module Linguist | ||||
|     # | ||||
|     # Returns a String like '100644' | ||||
|     def mode | ||||
|       File.stat(@fullpath).mode.to_s(8) | ||||
|       @mode ||= File.stat(@fullpath).mode.to_s(8) | ||||
|     end | ||||
|  | ||||
|     # Public: Read file contents. | ||||
|     # | ||||
|     # Returns a String. | ||||
|     def data | ||||
|       File.read(@fullpath) | ||||
|       @data ||= File.read(@fullpath) | ||||
|     end | ||||
|  | ||||
|     # Public: Get byte size | ||||
|     # | ||||
|     # Returns an Integer. | ||||
|     def size | ||||
|       File.size(@fullpath) | ||||
|       @size ||= File.size(@fullpath) | ||||
|     end | ||||
|   end | ||||
| end | ||||
|   | ||||
| @@ -52,6 +52,8 @@ module Linguist | ||||
|     # Return true or false | ||||
|     def generated? | ||||
|       xcode_file? || | ||||
|       cocoapods? || | ||||
|       carthage_build? || | ||||
|       generated_net_designer_file? || | ||||
|       generated_net_specflow_feature_file? || | ||||
|       composer_lock? || | ||||
| @@ -95,6 +97,20 @@ module Linguist | ||||
|       ['.nib', '.xcworkspacedata', '.xcuserstate'].include?(extname) | ||||
|     end | ||||
|  | ||||
|     # Internal: Is the blob part of Pods/, which contains dependencies not meant for humans in pull requests. | ||||
|     # | ||||
|     # Returns true or false. | ||||
|     def cocoapods? | ||||
|       !!name.match(/(^Pods|\/Pods)\//) | ||||
|     end | ||||
|  | ||||
|     # Internal: Is the blob part of Carthage/Build/, which contains dependencies not meant for humans in pull requests. | ||||
|     # | ||||
|     # Returns true or false. | ||||
|     def carthage_build? | ||||
|       !!name.match(/(^|\/)Carthage\/Build\//) | ||||
|     end | ||||
|  | ||||
|     # Internal: Is the blob minified files? | ||||
|     # | ||||
|     # Consider a file minified if the average line length is | ||||
|   | ||||
| @@ -1,6 +1,8 @@ | ||||
| module Linguist | ||||
|   # A collection of simple heuristics that can be used to better analyze languages. | ||||
|   class Heuristics | ||||
|     HEURISTICS_CONSIDER_BYTES = 50 * 1024 | ||||
|  | ||||
|     # Public: Use heuristics to detect language of the blob. | ||||
|     # | ||||
|     # blob               - An object that quacks like a blob. | ||||
| @@ -14,7 +16,7 @@ module Linguist | ||||
|     # | ||||
|     # Returns an Array of languages, or empty if none matched or were inconclusive. | ||||
|     def self.call(blob, candidates) | ||||
|       data = blob.data | ||||
|       data = blob.data[0...HEURISTICS_CONSIDER_BYTES] | ||||
|  | ||||
|       @heuristics.each do |heuristic| | ||||
|         if heuristic.matches?(blob.name, candidates) | ||||
| @@ -71,7 +73,17 @@ module Linguist | ||||
|     end | ||||
|  | ||||
|     # Common heuristics | ||||
|     CPlusPlusRegex = Regexp.union( | ||||
|         /^\s*#\s*include <(cstdint|string|vector|map|list|array|bitset|queue|stack|forward_list|unordered_map|unordered_set|(i|o|io)stream)>/, | ||||
|         /^\s*template\s*</, | ||||
|         /^[ \t]*try/, | ||||
|         /^[ \t]*catch\s*\(/, | ||||
|         /^[ \t]*(class|(using[ \t]+)?namespace)\s+\w+/, | ||||
|         /^[ \t]*(private|public|protected):$/, | ||||
|         /std::\w+/) | ||||
|     ObjectiveCRegex = /^\s*(@(interface|class|protocol|property|end|synchronised|selector|implementation)\b|#import\s+.+\.h[">])/ | ||||
|     Perl5Regex = /\buse\s+(?:strict\b|v?5\.)/ | ||||
|     Perl6Regex = /^\s*(?:use\s+v6\b|\bmodule\b|\b(?:my\s+)?class\b)/ | ||||
|  | ||||
|     disambiguate ".as" do |data| | ||||
|       if /^\s*(package\s+[a-z0-9_\.]+|import\s+[a-zA-Z0-9_\.]+;|class\s+[A-Za-z0-9_]+\s+extends\s+[A-Za-z0-9_]+)/.match(data) | ||||
| @@ -219,8 +231,7 @@ module Linguist | ||||
|     disambiguate ".h" do |data| | ||||
|       if ObjectiveCRegex.match(data) | ||||
|         Language["Objective-C"] | ||||
|       elsif (/^\s*#\s*include <(cstdint|string|vector|map|list|array|bitset|queue|stack|forward_list|unordered_map|unordered_set|(i|o|io)stream)>/.match(data) || | ||||
|         /^\s*template\s*</.match(data) || /^[ \t]*try/.match(data) || /^[ \t]*catch\s*\(/.match(data) || /^[ \t]*(class|(using[ \t]+)?namespace)\s+\w+/.match(data) || /^[ \t]*(private|public|protected):$/.match(data) || /std::\w+/.match(data)) | ||||
|       elsif CPlusPlusRegex.match(data) | ||||
|         Language["C++"] | ||||
|       end | ||||
|     end | ||||
| @@ -350,17 +361,17 @@ module Linguist | ||||
|     disambiguate ".pl" do |data| | ||||
|       if /^[^#]*:-/.match(data) | ||||
|         Language["Prolog"] | ||||
|       elsif /use strict|use\s+v?5\./.match(data) | ||||
|       elsif Perl5Regex.match(data) | ||||
|         Language["Perl"] | ||||
|       elsif /^(use v6|(my )?class|module)/.match(data) | ||||
|       elsif Perl6Regex.match(data) | ||||
|         Language["Perl 6"] | ||||
|       end | ||||
|     end | ||||
|  | ||||
|     disambiguate ".pm" do |data| | ||||
|       if /\buse\s+(?:strict\b|v?5\.)/.match(data) | ||||
|       if Perl5Regex.match(data) | ||||
|         Language["Perl"] | ||||
|       elsif /^\s*(?:use\s+v6\s*;|(?:\bmy\s+)?class|module)\b/.match(data) | ||||
|       elsif Perl6Regex.match(data) | ||||
|         Language["Perl 6"] | ||||
|       elsif /^\s*\/\* XPM \*\//.match(data) | ||||
|         Language["XPM"] | ||||
| @@ -368,7 +379,7 @@ module Linguist | ||||
|     end | ||||
|  | ||||
|     disambiguate ".pro" do |data| | ||||
|       if /^[^#]+:-/.match(data) | ||||
|       if /^[^\[#]+:-/.match(data) | ||||
|         Language["Prolog"] | ||||
|       elsif data.include?("last_client=") | ||||
|         Language["INI"] | ||||
| @@ -450,12 +461,12 @@ module Linguist | ||||
|     end | ||||
|      | ||||
|     disambiguate ".t" do |data| | ||||
|       if /^\s*%[ \t]+|^\s*var\s+\w+\s*:=\s*\w+/.match(data) | ||||
|         Language["Turing"] | ||||
|       elsif /^\s*(?:use\s+v6\s*;|\bmodule\b|\b(?:my\s+)?class\b)/.match(data) | ||||
|         Language["Perl 6"] | ||||
|       elsif /\buse\s+(?:strict\b|v?5\.)/.match(data) | ||||
|       if Perl5Regex.match(data) | ||||
|         Language["Perl"] | ||||
|       elsif Perl6Regex.match(data) | ||||
|         Language["Perl 6"] | ||||
|       elsif /^\s*%[ \t]+|^\s*var\s+\w+\s*:=\s*\w+/.match(data) | ||||
|         Language["Turing"] | ||||
|       end | ||||
|     end | ||||
|      | ||||
|   | ||||
| @@ -110,7 +110,7 @@ module Linguist | ||||
|     # Returns the Language or nil if none was found. | ||||
|     def self.find_by_name(name) | ||||
|       return nil if !name.is_a?(String) || name.to_s.empty? | ||||
|       name && (@name_index[name.downcase] || @name_index[name.split(',').first.downcase]) | ||||
|       name && (@name_index[name.downcase] || @name_index[name.split(',', 2).first.downcase]) | ||||
|     end | ||||
|  | ||||
|     # Public: Look up Language by one of its aliases. | ||||
| @@ -125,7 +125,7 @@ module Linguist | ||||
|     # Returns the Language or nil if none was found. | ||||
|     def self.find_by_alias(name) | ||||
|       return nil if !name.is_a?(String) || name.to_s.empty? | ||||
|       name && (@alias_index[name.downcase] || @alias_index[name.split(',').first.downcase]) | ||||
|       name && (@alias_index[name.downcase] || @alias_index[name.split(',', 2).first.downcase]) | ||||
|     end | ||||
|  | ||||
|     # Public: Look up Languages by filename. | ||||
| @@ -219,10 +219,7 @@ module Linguist | ||||
|       lang = @index[name.downcase] | ||||
|       return lang if lang | ||||
|  | ||||
|       name = name.split(',').first | ||||
|       return nil if name.to_s.empty? | ||||
|  | ||||
|       @index[name.downcase] | ||||
|       @index[name.split(',', 2).first.downcase] | ||||
|     end | ||||
|  | ||||
|     # Public: A List of popular languages | ||||
|   | ||||
| @@ -807,6 +807,19 @@ Common Lisp: | ||||
|   codemirror_mode: commonlisp | ||||
|   codemirror_mime_type: text/x-common-lisp | ||||
|   language_id: 66 | ||||
| Common Workflow Language: | ||||
|   alias: cwl | ||||
|   type: programming | ||||
|   ace_mode: yaml | ||||
|   codemirror_mode: yaml | ||||
|   codemirror_mime_type: text/x-yaml | ||||
|   extensions: | ||||
|   - ".cwl" | ||||
|   interpreters: | ||||
|   - cwl-runner | ||||
|   color: "#B5314C" | ||||
|   tm_scope: source.cwl | ||||
|   language_id: 988547172 | ||||
| Component Pascal: | ||||
|   type: programming | ||||
|   color: "#B0CE4E" | ||||
| @@ -1147,7 +1160,7 @@ Ecere Projects: | ||||
| Edje Data Collection: | ||||
|   type: data | ||||
|   extensions: | ||||
|    - ".edc" | ||||
|   - ".edc" | ||||
|   tm_scope: source.json | ||||
|   ace_mode: json | ||||
|   codemirror_mode: javascript | ||||
| @@ -2245,9 +2258,9 @@ Kotlin: | ||||
|   language_id: 189 | ||||
| LFE: | ||||
|   type: programming | ||||
|   color: "#4C3023" | ||||
|   extensions: | ||||
|   - ".lfe" | ||||
|   group: Erlang | ||||
|   tm_scope: source.lisp | ||||
|   ace_mode: lisp | ||||
|   codemirror_mode: commonlisp | ||||
| @@ -3427,6 +3440,14 @@ Pony: | ||||
|   tm_scope: source.pony | ||||
|   ace_mode: text | ||||
|   language_id: 290 | ||||
| PostCSS: | ||||
|   type: markup | ||||
|   tm_scope: source.postcss | ||||
|   group: CSS | ||||
|   extensions: | ||||
|   - ".pcss" | ||||
|   ace_mode: text | ||||
|   language_id: 262764437 | ||||
| PostScript: | ||||
|   type: markup | ||||
|   color: "#da291c" | ||||
| @@ -3592,6 +3613,7 @@ Python: | ||||
|   - ".gclient" | ||||
|   - BUCK | ||||
|   - BUILD | ||||
|   - BUILD.bazel | ||||
|   - SConscript | ||||
|   - SConstruct | ||||
|   - Snakefile | ||||
| @@ -4413,6 +4435,14 @@ Sublime Text Config: | ||||
|   - ".sublime_metrics" | ||||
|   - ".sublime_session" | ||||
|   language_id: 423 | ||||
| SugarSS: | ||||
|   type: markup | ||||
|   tm_scope: source.css.postcss.sugarss | ||||
|   group: CSS | ||||
|   extensions: | ||||
|   - ".sss" | ||||
|   ace_mode: text | ||||
|   language_id: 826404698 | ||||
| SuperCollider: | ||||
|   type: programming | ||||
|   color: "#46390b" | ||||
| @@ -5119,6 +5149,14 @@ YANG: | ||||
|   tm_scope: source.yang | ||||
|   ace_mode: text | ||||
|   language_id: 408 | ||||
| YARA: | ||||
|   type: data | ||||
|   ace_mode: text | ||||
|   extensions: | ||||
|   - ".yar" | ||||
|   - ".yara" | ||||
|   tm_scope: source.yara | ||||
|   language_id: 805122868 | ||||
| Yacc: | ||||
|   type: programming | ||||
|   extensions: | ||||
|   | ||||
| @@ -109,8 +109,8 @@ module Linguist | ||||
|       # Returns an Array with one Language if the blob has a Vim or Emacs modeline | ||||
|       # that matches a Language name or alias. Returns an empty array if no match. | ||||
|       def self.call(blob, _ = nil) | ||||
|         header = blob.lines.first(SEARCH_SCOPE).join("\n") | ||||
|         footer = blob.lines.last(SEARCH_SCOPE).join("\n") | ||||
|         header = blob.first_lines(SEARCH_SCOPE).join("\n") | ||||
|         footer = blob.last_lines(SEARCH_SCOPE).join("\n") | ||||
|         Array(Language.find_by_alias(modeline(header + footer))) | ||||
|       end | ||||
|  | ||||
|   | ||||
| @@ -1,4 +1,5 @@ | ||||
| require 'strscan' | ||||
| require 'linguist/linguist' | ||||
|  | ||||
| module Linguist | ||||
|   # Generic programming language tokenizer. | ||||
| @@ -15,191 +16,5 @@ module Linguist | ||||
|     def self.tokenize(data) | ||||
|       new.extract_tokens(data) | ||||
|     end | ||||
|  | ||||
|     # Read up to 100KB | ||||
|     BYTE_LIMIT = 100_000 | ||||
|  | ||||
|     # Start state on token, ignore anything till the next newline | ||||
|     SINGLE_LINE_COMMENTS = [ | ||||
|       '//', # C | ||||
|       '--', # Ada, Haskell, AppleScript | ||||
|       '#',  # Ruby | ||||
|       '%',  # Tex | ||||
|       '"',  # Vim | ||||
|     ] | ||||
|  | ||||
|     # Start state on opening token, ignore anything until the closing | ||||
|     # token is reached. | ||||
|     MULTI_LINE_COMMENTS = [ | ||||
|       ['/*', '*/'],    # C | ||||
|       ['<!--', '-->'], # XML | ||||
|       ['{-', '-}'],    # Haskell | ||||
|       ['(*', '*)'],    # Coq | ||||
|       ['"""', '"""'],  # Python | ||||
|       ["'''", "'''"]   # Python | ||||
|     ] | ||||
|  | ||||
|     START_SINGLE_LINE_COMMENT =  Regexp.compile(SINGLE_LINE_COMMENTS.map { |c| | ||||
|       "\s*#{Regexp.escape(c)} " | ||||
|     }.join("|")) | ||||
|  | ||||
|     START_MULTI_LINE_COMMENT =  Regexp.compile(MULTI_LINE_COMMENTS.map { |c| | ||||
|       Regexp.escape(c[0]) | ||||
|     }.join("|")) | ||||
|  | ||||
|     # Internal: Extract generic tokens from data. | ||||
|     # | ||||
|     # data - String to scan. | ||||
|     # | ||||
|     # Examples | ||||
|     # | ||||
|     #   extract_tokens("printf('Hello')") | ||||
|     #   # => ['printf', '(', ')'] | ||||
|     # | ||||
|     # Returns Array of token Strings. | ||||
|     def extract_tokens(data) | ||||
|       s = StringScanner.new(data) | ||||
|  | ||||
|       tokens = [] | ||||
|       until s.eos? | ||||
|         break if s.pos >= BYTE_LIMIT | ||||
|  | ||||
|         if token = s.scan(/^#!.+$/) | ||||
|           if name = extract_shebang(token) | ||||
|             tokens << "SHEBANG#!#{name}" | ||||
|           end | ||||
|  | ||||
|         # Single line comment | ||||
|         elsif s.beginning_of_line? && token = s.scan(START_SINGLE_LINE_COMMENT) | ||||
|           # tokens << token.strip | ||||
|           s.skip_until(/\n|\Z/) | ||||
|  | ||||
|         # Multiline comments | ||||
|         elsif token = s.scan(START_MULTI_LINE_COMMENT) | ||||
|           # tokens << token | ||||
|           close_token = MULTI_LINE_COMMENTS.assoc(token)[1] | ||||
|           s.skip_until(Regexp.compile(Regexp.escape(close_token))) | ||||
|           # tokens << close_token | ||||
|  | ||||
|         # Skip single or double quoted strings | ||||
|         elsif s.scan(/"/) | ||||
|           if s.peek(1) == "\"" | ||||
|             s.getch | ||||
|           else | ||||
|             s.skip_until(/(?<!\\)"/) | ||||
|           end | ||||
|         elsif s.scan(/'/) | ||||
|           if s.peek(1) == "'" | ||||
|             s.getch | ||||
|           else | ||||
|             s.skip_until(/(?<!\\)'/) | ||||
|           end | ||||
|  | ||||
|         # Skip number literals | ||||
|         elsif s.scan(/(0x\h(\h|\.)*|\d(\d|\.)*)([uU][lL]{0,2}|([eE][-+]\d*)?[fFlL]*)/) | ||||
|  | ||||
|         # SGML style brackets | ||||
|         elsif token = s.scan(/<[^\s<>][^<>]*>/) | ||||
|           extract_sgml_tokens(token).each { |t| tokens << t } | ||||
|  | ||||
|         # Common programming punctuation | ||||
|         elsif token = s.scan(/;|\{|\}|\(|\)|\[|\]/) | ||||
|           tokens << token | ||||
|  | ||||
|         # Regular token | ||||
|         elsif token = s.scan(/[\w\.@#\/\*]+/) | ||||
|           tokens << token | ||||
|  | ||||
|         # Common operators | ||||
|         elsif token = s.scan(/<<?|\+|\-|\*|\/|%|&&?|\|\|?/) | ||||
|           tokens << token | ||||
|  | ||||
|         else | ||||
|           s.getch | ||||
|         end | ||||
|       end | ||||
|  | ||||
|       tokens | ||||
|     end | ||||
|  | ||||
|     # Internal: Extract normalized shebang command token. | ||||
|     # | ||||
|     # Examples | ||||
|     # | ||||
|     #   extract_shebang("#!/usr/bin/ruby") | ||||
|     #   # => "ruby" | ||||
|     # | ||||
|     #   extract_shebang("#!/usr/bin/env node") | ||||
|     #   # => "node" | ||||
|     # | ||||
|     #   extract_shebang("#!/usr/bin/env A=B foo=bar awk -f") | ||||
|     #   # => "awk" | ||||
|     # | ||||
|     # Returns String token or nil it couldn't be parsed. | ||||
|     def extract_shebang(data) | ||||
|       s = StringScanner.new(data) | ||||
|  | ||||
|       if path = s.scan(/^#!\s*\S+/) | ||||
|         script = path.split('/').last | ||||
|         if script == 'env' | ||||
|           s.scan(/\s+/) | ||||
|           s.scan(/.*=[^\s]+\s+/) | ||||
|           script = s.scan(/\S+/) | ||||
|         end | ||||
|         script = script[/[^\d]+/, 0] if script | ||||
|         return script | ||||
|       end | ||||
|  | ||||
|       nil | ||||
|     end | ||||
|  | ||||
|     # Internal: Extract tokens from inside SGML tag. | ||||
|     # | ||||
|     # data - SGML tag String. | ||||
|     # | ||||
|     # Examples | ||||
|     # | ||||
|     #   extract_sgml_tokens("<a href='' class=foo>") | ||||
|     #   # => ["<a>", "href="] | ||||
|     # | ||||
|     # Returns Array of token Strings. | ||||
|     def extract_sgml_tokens(data) | ||||
|       s = StringScanner.new(data) | ||||
|  | ||||
|       tokens = [] | ||||
|  | ||||
|       until s.eos? | ||||
|         # Emit start token | ||||
|         if token = s.scan(/<\/?[^\s>]+/) | ||||
|           tokens << "#{token}>" | ||||
|  | ||||
|         # Emit attributes with trailing = | ||||
|         elsif token = s.scan(/\w+=/) | ||||
|           tokens << token | ||||
|  | ||||
|           # Then skip over attribute value | ||||
|           if s.scan(/"/) | ||||
|             s.skip_until(/[^\\]"/) | ||||
|           elsif s.scan(/'/) | ||||
|             s.skip_until(/[^\\]'/) | ||||
|           else | ||||
|             s.skip_until(/\w+/) | ||||
|           end | ||||
|  | ||||
|         # Emit lone attributes | ||||
|         elsif token = s.scan(/\w+/) | ||||
|           tokens << token | ||||
|  | ||||
|         # Stop at the end of the tag | ||||
|         elsif s.scan(/>/) | ||||
|           s.terminate | ||||
|  | ||||
|         else | ||||
|           s.getch | ||||
|         end | ||||
|       end | ||||
|  | ||||
|       tokens | ||||
|     end | ||||
|   end | ||||
| end | ||||
|   | ||||
| @@ -19,9 +19,7 @@ | ||||
| - (^|/)dist/ | ||||
|  | ||||
| # C deps | ||||
| #  https://github.com/joyent/node | ||||
| - ^deps/ | ||||
| - ^tools/ | ||||
| - (^|/)configure$ | ||||
| - (^|/)config.guess$ | ||||
| - (^|/)config.sub$ | ||||
| @@ -65,6 +63,7 @@ | ||||
|  | ||||
| # Font Awesome | ||||
| - (^|/)font-awesome\.(css|less|scss|styl)$ | ||||
| - (^|/)font-awesome/.*\.(css|less|scss|styl)$ | ||||
|  | ||||
| # Foundation css | ||||
| - (^|/)foundation\.(css|less|scss|styl)$ | ||||
| @@ -242,10 +241,7 @@ | ||||
| - \.imageset/ | ||||
|  | ||||
| # Carthage | ||||
| - ^Carthage/ | ||||
|  | ||||
| # Cocoapods | ||||
| - ^Pods/ | ||||
| - (^|/)Carthage/ | ||||
|  | ||||
| # Sparkle | ||||
| - (^|/)Sparkle/ | ||||
|   | ||||
| @@ -1,3 +1,3 @@ | ||||
| module Linguist | ||||
|   VERSION = "5.3.2" | ||||
|   VERSION = "5.3.3" | ||||
| end | ||||
|   | ||||
| @@ -1,7 +0,0 @@ | ||||
| { | ||||
|   "repository": "https://github.com/github/linguist", | ||||
|   "dependencies": { | ||||
|     "season": "~>5.4" | ||||
|   }, | ||||
|   "license": "MIT" | ||||
| } | ||||
							
								
								
									
										36
									
								
								samples/Common Workflow Language/trunk-peak-score.cwl
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										36
									
								
								samples/Common Workflow Language/trunk-peak-score.cwl
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,36 @@ | ||||
| #!/usr/bin/env cwl-runner | ||||
| # Originally from | ||||
| # https://github.com/Duke-GCB/GGR-cwl/blob/54e897263a702ff1074c8ac814b4bf7205d140dd/utils/trunk-peak-score.cwl | ||||
| # Released under the MIT License: | ||||
| # https://github.com/Duke-GCB/GGR-cwl/blob/54e897263a702ff1074c8ac814b4bf7205d140dd/LICENSE | ||||
| # Converted to CWL v1.0 syntax using | ||||
| # https://github.com/common-workflow-language/cwl-upgrader | ||||
| # and polished by Michael R. Crusoe <mrc@commonwl.org> | ||||
| # All modifications also released under the MIT License | ||||
| cwlVersion: v1.0 | ||||
| class: CommandLineTool | ||||
| doc: Trunk scores in ENCODE bed6+4 files | ||||
|  | ||||
| hints: | ||||
|   DockerRequirement: | ||||
|     dockerPull: dukegcb/workflow-utils | ||||
|  | ||||
| inputs: | ||||
|   peaks: | ||||
|     type: File | ||||
|   sep: | ||||
|     type: string | ||||
|     default: \t | ||||
|  | ||||
| outputs: | ||||
|   trunked_scores_peaks: | ||||
|     type: stdout | ||||
|  | ||||
| baseCommand: awk | ||||
|  | ||||
| arguments: | ||||
| - -F $(inputs.sep) | ||||
| - BEGIN{OFS=FS}$5>1000{$5=1000}{print} | ||||
| - $(inputs.peaks.path) | ||||
|  | ||||
| stdout: $(inputs.peaks.nameroot).trunked_scores$(inputs.peaks.nameext) | ||||
							
								
								
									
										13
									
								
								samples/PostCSS/sample.pcss
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								samples/PostCSS/sample.pcss
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,13 @@ | ||||
| @define-mixin size $size { | ||||
|   width: $size; | ||||
| } | ||||
|  | ||||
| $big: 100px; | ||||
|  | ||||
| /* Main block */ | ||||
| .block { | ||||
|   &_logo { | ||||
|     background: inline("./logo.png"); | ||||
|     @mixin size $big; | ||||
|   } | ||||
| } | ||||
							
								
								
									
										10
									
								
								samples/SugarSS/sample.sss
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								samples/SugarSS/sample.sss
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | ||||
| @define-mixin size $size | ||||
|   width: $size | ||||
|  | ||||
| $big: 100px | ||||
|  | ||||
| // Main block | ||||
| .block | ||||
|   &_logo | ||||
|     background: inline("./logo.png") | ||||
|     @mixin size $big | ||||
							
								
								
									
										23
									
								
								samples/YARA/OfExample.yar
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								samples/YARA/OfExample.yar
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,23 @@ | ||||
| rule OfExample2 | ||||
| { | ||||
|     strings: | ||||
|         $foo1 = "foo1" | ||||
|         $foo2 = "foo2" | ||||
|         $foo3 = "foo3" | ||||
|  | ||||
|     condition: | ||||
|         2 of ($foo*)  // equivalent to 2 of ($foo1,$foo2,$foo3) | ||||
| } | ||||
|  | ||||
| rule OfExample3 | ||||
| { | ||||
|     strings: | ||||
|         $foo1 = "foo1" | ||||
|         $foo2 = "foo2" | ||||
|  | ||||
|         $bar1 = "bar1" | ||||
|         $bar2 = "bar2" | ||||
|  | ||||
|     condition: | ||||
|         3 of ($foo*,$bar1,$bar2) | ||||
| } | ||||
							
								
								
									
										13
									
								
								samples/YARA/example.yara
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								samples/YARA/example.yara
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,13 @@ | ||||
| rule silent_banker : banker | ||||
| { | ||||
|     meta: | ||||
|         description = "This is just an example" | ||||
|         thread_level = 3 | ||||
|         in_the_wild = true | ||||
|     strings: | ||||
|         $a = {6A 40 68 00 30 00 00 6A 14 8D 91} | ||||
|         $b = {8D 4D B0 2B C1 83 C0 27 99 6A 4E 59 F7 F9} | ||||
|         $c = "UVODFRYSIHLNWPEJXQZAKCBGMT" | ||||
|     condition: | ||||
|         $a or $b or $c | ||||
| } | ||||
							
								
								
									
										1
									
								
								samples/YARA/true.yar
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								samples/YARA/true.yar
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
| rule test { condition: true } | ||||
| @@ -1,6 +1,7 @@ | ||||
| #!/usr/bin/env ruby | ||||
|  | ||||
| require "optparse" | ||||
| require "open3" | ||||
|  | ||||
| ROOT = File.expand_path("../../", __FILE__) | ||||
|  | ||||
| @@ -42,6 +43,17 @@ def log(msg) | ||||
|   puts msg if $verbose | ||||
| end | ||||
|  | ||||
| def command(*args) | ||||
|   log "$ #{args.join(' ')}" | ||||
|   output, status = Open3.capture2e(*args) | ||||
|   if !status.success? | ||||
|     output.each_line do |line| | ||||
|       log "  > #{line}" | ||||
|     end | ||||
|     warn "Command failed. Aborting." | ||||
|     exit 1 | ||||
|   end | ||||
| end | ||||
|  | ||||
| usage = """Usage: | ||||
|   #{$0} [-v|--verbose] [--replace grammar] url | ||||
| @@ -51,12 +63,12 @@ Examples: | ||||
| """ | ||||
|  | ||||
| $replace = nil | ||||
| $verbose = false | ||||
| $verbose = true | ||||
|  | ||||
| OptionParser.new do |opts| | ||||
|   opts.banner = usage | ||||
|   opts.on("-v", "--verbose", "Print verbose feedback to STDOUT") do | ||||
|     $verbose = true | ||||
|   opts.on("-q", "--quiet", "Do not print output unless there's a failure") do | ||||
|     $verbose = false | ||||
|   end | ||||
|   opts.on("-rSUBMODULE", "--replace=SUBMODDULE", "Replace an existing grammar submodule.") do |name| | ||||
|     $replace = name | ||||
| @@ -82,23 +94,22 @@ Dir.chdir(ROOT) | ||||
|  | ||||
| if repo_old | ||||
|   log "Deregistering: #{repo_old}" | ||||
|   `git submodule deinit #{repo_old}` | ||||
|   `git rm -rf #{repo_old}` | ||||
|   `script/convert-grammars` | ||||
|   command('git', 'submodule', 'deinit', repo_old) | ||||
|   command('git', 'rm', '-rf', repo_old) | ||||
|   command('script/grammar-compiler', 'update', '-f') | ||||
| end | ||||
|  | ||||
| log "Registering new submodule: #{repo_new}" | ||||
| `git submodule add -f #{https} #{repo_new}` | ||||
| exit 1 if $?.exitstatus > 0 | ||||
| `script/convert-grammars --add #{repo_new}` | ||||
| command('git', 'submodule', 'add', '-f', https, repo_new) | ||||
| command('script/grammar-compiler', 'add', repo_new) | ||||
|  | ||||
| log "Confirming license" | ||||
| if repo_old | ||||
|   `script/licensed` | ||||
|   command('script/licensed') | ||||
| else | ||||
|   `script/licensed --module "#{repo_new}"` | ||||
|   command('script/licensed', '--module', repo_new) | ||||
| end | ||||
|  | ||||
| log "Updating grammar documentation in vendor/README.md" | ||||
| `bundle exec rake samples` | ||||
| `script/list-grammars` | ||||
| command('bundle', 'exec', 'rake', 'samples') | ||||
| command('script/list-grammars') | ||||
|   | ||||
| @@ -1,319 +0,0 @@ | ||||
| #!/usr/bin/env ruby | ||||
|  | ||||
| require 'bundler/setup' | ||||
| require 'json' | ||||
| require 'net/http' | ||||
| require 'optparse' | ||||
| require 'plist' | ||||
| require 'set' | ||||
| require 'thread' | ||||
| require 'tmpdir' | ||||
| require 'uri' | ||||
| require 'yaml' | ||||
|  | ||||
| ROOT = File.expand_path("../..", __FILE__) | ||||
| GRAMMARS_PATH = File.join(ROOT, "grammars") | ||||
| SOURCES_FILE = File.join(ROOT, "grammars.yml") | ||||
| CSONC = File.join(ROOT, "node_modules", ".bin", "csonc") | ||||
|  | ||||
| $options = { | ||||
|   :add => false, | ||||
|   :install => true, | ||||
|   :output => SOURCES_FILE, | ||||
|   :remote => true, | ||||
| } | ||||
|  | ||||
| class SingleFile | ||||
|   def initialize(path) | ||||
|     @path = path | ||||
|   end | ||||
|  | ||||
|   def url | ||||
|     @path | ||||
|   end | ||||
|  | ||||
|   def fetch(tmp_dir) | ||||
|     [@path] | ||||
|   end | ||||
| end | ||||
|  | ||||
| class DirectoryPackage | ||||
|   def self.fetch(dir) | ||||
|     Dir["#{dir}/**/*"].select do |path| | ||||
|       case File.extname(path.downcase) | ||||
|       when '.plist' | ||||
|         path.split('/')[-2] == 'Syntaxes' | ||||
|       when '.tmlanguage', '.yaml-tmlanguage' | ||||
|         true | ||||
|       when '.cson', '.json' | ||||
|         path.split('/')[-2] == 'grammars' | ||||
|       else | ||||
|         false | ||||
|       end | ||||
|     end | ||||
|   end | ||||
|  | ||||
|   def initialize(directory) | ||||
|     @directory = directory | ||||
|   end | ||||
|  | ||||
|   def url | ||||
|     @directory | ||||
|   end | ||||
|  | ||||
|   def fetch(tmp_dir) | ||||
|     self.class.fetch(File.join(ROOT, @directory)) | ||||
|   end | ||||
| end | ||||
|  | ||||
| class TarballPackage | ||||
|   def self.fetch(tmp_dir, url) | ||||
|     `curl --silent --location --max-time 30 --output "#{tmp_dir}/archive" "#{url}"` | ||||
|     raise "Failed to fetch GH package: #{url} #{$?.to_s}" unless $?.success? | ||||
|  | ||||
|     output = File.join(tmp_dir, 'extracted') | ||||
|     Dir.mkdir(output) | ||||
|     `tar -C "#{output}" -xf "#{tmp_dir}/archive"` | ||||
|     raise "Failed to uncompress tarball: #{tmp_dir}/archive (from #{url}) #{$?.to_s}" unless $?.success? | ||||
|  | ||||
|     DirectoryPackage.fetch(output) | ||||
|   end | ||||
|  | ||||
|   attr_reader :url | ||||
|  | ||||
|   def initialize(url) | ||||
|     @url = url | ||||
|   end | ||||
|  | ||||
|   def fetch(tmp_dir) | ||||
|     self.class.fetch(tmp_dir, url) | ||||
|   end | ||||
| end | ||||
|  | ||||
| class SingleGrammar | ||||
|   attr_reader :url | ||||
|  | ||||
|   def initialize(url) | ||||
|     @url = url | ||||
|   end | ||||
|  | ||||
|   def fetch(tmp_dir) | ||||
|     filename = File.join(tmp_dir, File.basename(url)) | ||||
|     `curl --silent --location --max-time 10 --output "#{filename}" "#{url}"` | ||||
|     raise "Failed to fetch grammar: #{url}: #{$?.to_s}" unless $?.success? | ||||
|     [filename] | ||||
|   end | ||||
| end | ||||
|  | ||||
| class SVNPackage | ||||
|   attr_reader :url | ||||
|  | ||||
|   def initialize(url) | ||||
|     @url = url | ||||
|   end | ||||
|  | ||||
|   def fetch(tmp_dir) | ||||
|     `svn export -q "#{url}/Syntaxes" "#{tmp_dir}/Syntaxes"` | ||||
|     raise "Failed to export SVN repository: #{url}: #{$?.to_s}" unless $?.success? | ||||
|     Dir["#{tmp_dir}/Syntaxes/*.{plist,tmLanguage,tmlanguage,YAML-tmLanguage}"] | ||||
|   end | ||||
| end | ||||
|  | ||||
| class GitHubPackage | ||||
|   def self.parse_url(url) | ||||
|     url, ref = url.split("@", 2) | ||||
|     path = URI.parse(url).path.split('/') | ||||
|     [path[1], path[2].chomp('.git'), ref || "master"] | ||||
|   end | ||||
|  | ||||
|   attr_reader :user | ||||
|   attr_reader :repo | ||||
|   attr_reader :ref | ||||
|  | ||||
|   def initialize(url) | ||||
|     @user, @repo, @ref = self.class.parse_url(url) | ||||
|   end | ||||
|  | ||||
|   def url | ||||
|     suffix = "@#{ref}" unless ref == "master" | ||||
|     "https://github.com/#{user}/#{repo}#{suffix}" | ||||
|   end | ||||
|  | ||||
|   def fetch(tmp_dir) | ||||
|     url = "https://github.com/#{user}/#{repo}/archive/#{ref}.tar.gz" | ||||
|     TarballPackage.fetch(tmp_dir, url) | ||||
|   end | ||||
| end | ||||
|  | ||||
| def load_grammar(path) | ||||
|   case File.extname(path.downcase) | ||||
|   when '.plist', '.tmlanguage' | ||||
|     Plist::parse_xml(path) | ||||
|   when '.yaml-tmlanguage' | ||||
|     content = File.read(path) | ||||
|     # Attempt to parse YAML file even if it has a YAML 1.2 header | ||||
|     if content.lines[0] =~ /^%YAML[ :]1\.2/ | ||||
|       content = content.lines[1..-1].join | ||||
|     end | ||||
|     begin | ||||
|       YAML.load(content) | ||||
|     rescue Psych::SyntaxError => e | ||||
|       $stderr.puts "Failed to parse YAML grammar '#{path}'" | ||||
|     end | ||||
|   when '.cson' | ||||
|     cson = `"#{CSONC}" "#{path}"` | ||||
|     raise "Failed to convert CSON grammar '#{path}': #{$?.to_s}" unless $?.success? | ||||
|     JSON.parse(cson) | ||||
|   when '.json' | ||||
|     JSON.parse(File.read(path)) | ||||
|   else | ||||
|     raise "Invalid document type #{path}" | ||||
|   end | ||||
| end | ||||
|  | ||||
| def load_grammars(tmp_dir, source, all_scopes) | ||||
|   is_url = source.start_with?("http:", "https:") | ||||
|   return [] if is_url && !$options[:remote] | ||||
|   return [] if !is_url && !File.exist?(source) | ||||
|  | ||||
|   p = if !is_url | ||||
|         if File.directory?(source) | ||||
|           DirectoryPackage.new(source) | ||||
|         else | ||||
|           SingleFile.new(source) | ||||
|         end | ||||
|       elsif source.end_with?('.tmLanguage', '.plist', '.YAML-tmLanguage') | ||||
|         SingleGrammar.new(source) | ||||
|       elsif source.start_with?('https://github.com') | ||||
|         GitHubPackage.new(source) | ||||
|       elsif source.start_with?('http://svn.textmate.org') | ||||
|         SVNPackage.new(source) | ||||
|       elsif source.end_with?('.tar.gz') | ||||
|         TarballPackage.new(source) | ||||
|       else | ||||
|         nil | ||||
|       end | ||||
|  | ||||
|   raise "Unsupported source: #{source}" unless p | ||||
|  | ||||
|   p.fetch(tmp_dir).map do |path| | ||||
|     grammar = load_grammar(path) | ||||
|     scope = grammar['scopeName'] || grammar['scope'] | ||||
|  | ||||
|     if all_scopes.key?(scope) | ||||
|       unless all_scopes[scope] == p.url | ||||
|         $stderr.puts "WARN: Duplicated scope #{scope}\n" + | ||||
|           "  Current package: #{p.url}\n" + | ||||
|           "  Previous package: #{all_scopes[scope]}" | ||||
|       end | ||||
|       next | ||||
|     end | ||||
|     all_scopes[scope] = p.url | ||||
|     grammar | ||||
|   end.compact | ||||
| end | ||||
|  | ||||
| def install_grammars(grammars, path) | ||||
|   installed = [] | ||||
|  | ||||
|   grammars.each do |grammar| | ||||
|     scope = grammar['scopeName'] || grammar['scope'] | ||||
|     File.write(File.join(GRAMMARS_PATH, "#{scope}.json"), JSON.pretty_generate(grammar)) | ||||
|     installed << scope | ||||
|   end | ||||
|  | ||||
|   $stderr.puts("OK #{path} (#{installed.join(', ')})") | ||||
| end | ||||
|  | ||||
| def run_thread(queue, all_scopes) | ||||
|   Dir.mktmpdir do |tmpdir| | ||||
|     loop do | ||||
|       source, index = begin | ||||
|         queue.pop(true) | ||||
|       rescue ThreadError | ||||
|         # The queue is empty. | ||||
|         break | ||||
|       end | ||||
|  | ||||
|       dir = "#{tmpdir}/#{index}" | ||||
|       Dir.mkdir(dir) | ||||
|  | ||||
|       grammars = load_grammars(dir, source, all_scopes) | ||||
|       install_grammars(grammars, source) if $options[:install] | ||||
|     end | ||||
|   end | ||||
| end | ||||
|  | ||||
| def generate_yaml(all_scopes, base) | ||||
|   yaml = all_scopes.each_with_object(base) do |(key,value),out| | ||||
|     out[value] ||= [] | ||||
|     out[value] << key | ||||
|   end | ||||
|  | ||||
|   yaml = Hash[yaml.sort] | ||||
|   yaml.each { |k, v| v.sort! } | ||||
|   yaml | ||||
| end | ||||
|  | ||||
| def main(sources) | ||||
|   begin | ||||
|     Dir.mkdir(GRAMMARS_PATH) | ||||
|   rescue Errno::EEXIST | ||||
|   end | ||||
|  | ||||
|   `npm install` | ||||
|  | ||||
|   all_scopes = {} | ||||
|  | ||||
|   if source = $options[:add] | ||||
|     Dir.mktmpdir do |tmpdir| | ||||
|       grammars = load_grammars(tmpdir, source, all_scopes) | ||||
|       install_grammars(grammars, source) if $options[:install] | ||||
|     end | ||||
|     generate_yaml(all_scopes, sources) | ||||
|   else | ||||
|     queue = Queue.new | ||||
|  | ||||
|     sources.each do |url, scopes| | ||||
|       queue.push([url, queue.length]) | ||||
|     end | ||||
|  | ||||
|     threads = 8.times.map do | ||||
|       Thread.new { run_thread(queue, all_scopes) } | ||||
|     end | ||||
|     threads.each(&:join) | ||||
|     generate_yaml(all_scopes, {}) | ||||
|   end | ||||
| end | ||||
|  | ||||
| OptionParser.new do |opts| | ||||
|   opts.banner = "Usage: #{$0} [options]" | ||||
|  | ||||
|   opts.on("--add GRAMMAR", "Add a new grammar. GRAMMAR may be a file path or URL.") do |a| | ||||
|     $options[:add] = a | ||||
|   end | ||||
|  | ||||
|   opts.on("--[no-]install", "Install grammars into grammars/ directory.") do |i| | ||||
|     $options[:install] = i | ||||
|   end | ||||
|  | ||||
|   opts.on("--output FILE", "Write output to FILE. Use - for stdout.") do |o| | ||||
|     $options[:output] = o == "-" ? $stdout : o | ||||
|   end | ||||
|  | ||||
|   opts.on("--[no-]remote", "Download remote grammars.") do |r| | ||||
|     $options[:remote] = r | ||||
|   end | ||||
| end.parse! | ||||
|  | ||||
| sources = File.open(SOURCES_FILE) do |file| | ||||
|   YAML.load(file) | ||||
| end | ||||
|  | ||||
| yaml = main(sources) | ||||
|  | ||||
| if $options[:output].is_a?(IO) | ||||
|   $options[:output].write(YAML.dump(yaml)) | ||||
| else | ||||
|   File.write($options[:output], YAML.dump(yaml)) | ||||
| end | ||||
							
								
								
									
										12
									
								
								script/grammar-compiler
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										12
									
								
								script/grammar-compiler
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,12 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| set -e | ||||
| cd "$(dirname "$0")/.." | ||||
|  | ||||
| image="linguist/grammar-compiler:latest" | ||||
| mkdir -p grammars | ||||
|  | ||||
| exec docker run --rm \ | ||||
|     -u $(id -u $USER):$(id -g $USER) \ | ||||
|     -v $PWD:/src/linguist \ | ||||
|     -w /src/linguist $image "$@" | ||||
| @@ -1,60 +0,0 @@ | ||||
| #!/usr/bin/env ruby | ||||
|  | ||||
| require "bundler/setup" | ||||
| require "json" | ||||
| require "linguist" | ||||
| require "set" | ||||
| require "yaml" | ||||
|  | ||||
| ROOT = File.expand_path("../../", __FILE__) | ||||
|  | ||||
| def find_includes(json) | ||||
|   case json | ||||
|   when Hash | ||||
|     result = [] | ||||
|     if inc = json["include"] | ||||
|       result << inc.split("#", 2).first unless inc.start_with?("#", "$") | ||||
|     end | ||||
|     result + json.values.flat_map { |v| find_includes(v) } | ||||
|   when Array | ||||
|     json.flat_map { |v| find_includes(v) } | ||||
|   else | ||||
|     [] | ||||
|   end | ||||
| end | ||||
|  | ||||
| def transitive_includes(scope, includes) | ||||
|   scopes = Set.new | ||||
|   queue = includes[scope] || [] | ||||
|   while s = queue.shift | ||||
|     next if scopes.include?(s) | ||||
|     scopes << s | ||||
|     queue += includes[s] || [] | ||||
|   end | ||||
|   scopes | ||||
| end | ||||
|  | ||||
| includes = {} | ||||
| Dir[File.join(ROOT, "grammars/*.json")].each do |path| | ||||
|   scope = File.basename(path).sub(/\.json/, '') | ||||
|   json = JSON.load(File.read(path)) | ||||
|   incs = find_includes(json) | ||||
|   next if incs.empty? | ||||
|   includes[scope] ||= [] | ||||
|   includes[scope] += incs | ||||
| end | ||||
|  | ||||
| yaml = YAML.load(File.read(File.join(ROOT, "grammars.yml"))) | ||||
| language_scopes = Linguist::Language.all.map(&:tm_scope).to_set | ||||
|  | ||||
| # The set of used scopes is the scopes for each language, plus all the scopes | ||||
| # they include, transitively. | ||||
| used_scopes = language_scopes + language_scopes.flat_map { |s| transitive_includes(s, includes).to_a }.to_set | ||||
|  | ||||
| unused = yaml.reject { |repo, scopes| scopes.any? { |scope| used_scopes.include?(scope) } } | ||||
|  | ||||
| puts "Unused grammar repos" | ||||
| puts unused.map { |repo, scopes| sprintf("%-100s %s", repo, scopes.join(", ")) }.sort.join("\n") | ||||
|  | ||||
| yaml.delete_if { |k| unused.key?(k) } | ||||
| File.write(File.join(ROOT, "grammars.yml"), YAML.dump(yaml)) | ||||
| @@ -188,6 +188,17 @@ class TestFileBlob < Minitest::Test | ||||
|     assert fixture_blob("Binary/MainMenu.nib").generated? | ||||
|     assert !sample_blob("XML/project.pbxproj").generated? | ||||
|  | ||||
|     # Cocoapods | ||||
|     assert sample_blob('Pods/blah').generated? | ||||
|     assert !sample_blob('My-Pods/blah').generated? | ||||
|  | ||||
|     # Carthage | ||||
|     assert sample_blob('Carthage/Build/blah').generated? | ||||
|     assert !sample_blob('Carthage/blah').generated? | ||||
|     assert !sample_blob('Carthage/Checkout/blah').generated? | ||||
|     assert !sample_blob('My-Carthage/Build/blah').generated? | ||||
|     assert !sample_blob('My-Carthage/Build/blah').generated? | ||||
|  | ||||
|     # Gemfile.lock is NOT generated | ||||
|     assert !sample_blob("Gemfile.lock").generated? | ||||
|  | ||||
| @@ -313,8 +324,6 @@ class TestFileBlob < Minitest::Test | ||||
|     assert sample_blob("deps/http_parser/http_parser.c").vendored? | ||||
|     assert sample_blob("deps/v8/src/v8.h").vendored? | ||||
|  | ||||
|     assert sample_blob("tools/something/else.c").vendored? | ||||
|  | ||||
|     # Chart.js | ||||
|     assert sample_blob("some/vendored/path/Chart.js").vendored? | ||||
|     assert !sample_blob("some/vendored/path/chart.js").vendored? | ||||
| @@ -490,9 +499,9 @@ class TestFileBlob < Minitest::Test | ||||
|  | ||||
|     # Carthage | ||||
|     assert sample_blob('Carthage/blah').vendored? | ||||
|  | ||||
|     # Cocoapods | ||||
|     assert sample_blob('Pods/blah').vendored? | ||||
|     assert sample_blob('iOS/Carthage/blah').vendored? | ||||
|     assert !sample_blob('My-Carthage/blah').vendored? | ||||
|     assert !sample_blob('iOS/My-Carthage/blah').vendored? | ||||
|  | ||||
|     # Html5shiv | ||||
|     assert sample_blob("Scripts/html5shiv.js").vendored? | ||||
|   | ||||
| @@ -42,6 +42,24 @@ class TestGenerated < Minitest::Test | ||||
|     generated_sample_without_loading_data("Dummy/foo.xcworkspacedata") | ||||
|     generated_sample_without_loading_data("Dummy/foo.xcuserstate") | ||||
|  | ||||
|     # Cocoapods | ||||
|     generated_sample_without_loading_data("Pods/Pods.xcodeproj") | ||||
|     generated_sample_without_loading_data("Pods/SwiftDependency/foo.swift") | ||||
|     generated_sample_without_loading_data("Pods/ObjCDependency/foo.h") | ||||
|     generated_sample_without_loading_data("Pods/ObjCDependency/foo.m") | ||||
|     generated_sample_without_loading_data("Dummy/Pods/Pods.xcodeproj") | ||||
|     generated_sample_without_loading_data("Dummy/Pods/SwiftDependency/foo.swift") | ||||
|     generated_sample_without_loading_data("Dummy/Pods/ObjCDependency/foo.h") | ||||
|     generated_sample_without_loading_data("Dummy/Pods/ObjCDependency/foo.m") | ||||
|  | ||||
|     # Carthage | ||||
|     generated_sample_without_loading_data("Carthage/Build/.Dependency.version") | ||||
|     generated_sample_without_loading_data("Carthage/Build/iOS/Dependency.framework") | ||||
|     generated_sample_without_loading_data("Carthage/Build/Mac/Dependency.framework") | ||||
|     generated_sample_without_loading_data("src/Carthage/Build/.Dependency.version") | ||||
|     generated_sample_without_loading_data("src/Carthage/Build/iOS/Dependency.framework") | ||||
|     generated_sample_without_loading_data("src/Carthage/Build/Mac/Dependency.framework") | ||||
|  | ||||
|     # Go-specific vendored paths | ||||
|     generated_sample_without_loading_data("go/vendor/github.com/foo.go") | ||||
|     generated_sample_without_loading_data("go/vendor/golang.org/src/foo.c") | ||||
|   | ||||
| @@ -94,19 +94,6 @@ class TestGrammars < Minitest::Test | ||||
|     assert nonexistent_submodules.empty? && unlisted_submodules.empty?, message.sub(/\.\Z/, "") | ||||
|   end | ||||
|  | ||||
|   def test_local_scopes_are_in_sync | ||||
|     actual = YAML.load(`"#{File.join(ROOT, "script", "convert-grammars")}" --output - --no-install --no-remote`) | ||||
|     assert $?.success?, "script/convert-grammars failed" | ||||
|  | ||||
|     # We're not checking remote grammars. That can take a long time and make CI | ||||
|     # flaky if network conditions are poor. | ||||
|     @grammars.delete_if { |k, v| k.start_with?("http:", "https:") } | ||||
|  | ||||
|     @grammars.each do |k, v| | ||||
|       assert_equal v, actual[k], "The scopes listed for #{k} in grammars.yml don't match the scopes found in that repository" | ||||
|     end | ||||
|   end | ||||
|  | ||||
|   def test_readme_file_is_in_sync | ||||
|     current_data = File.read("#{ROOT}/vendor/README.md").to_s.sub(/\A.+?<!--.+?-->\n/ms, "") | ||||
|     updated_data = `script/list-grammars --print` | ||||
|   | ||||
| @@ -470,5 +470,7 @@ class TestLanguage < Minitest::Test | ||||
|  | ||||
|   def test_non_crash_on_comma | ||||
|     assert_nil Language[','] | ||||
|     assert_nil Language.find_by_name(',') | ||||
|     assert_nil Language.find_by_alias(',') | ||||
|   end | ||||
| end | ||||
|   | ||||
							
								
								
									
										1
									
								
								tools/grammars/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								tools/grammars/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
| /vendor | ||||
							
								
								
									
										35
									
								
								tools/grammars/Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										35
									
								
								tools/grammars/Dockerfile
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,35 @@ | ||||
| FROM golang:1.9.2 | ||||
|  | ||||
| RUN apt-get update | ||||
| RUN apt-get upgrade -y | ||||
| RUN apt-get install -y curl gnupg | ||||
|  | ||||
| RUN curl -sL https://deb.nodesource.com/setup_6.x | bash - | ||||
| RUN apt-get install -y nodejs | ||||
| RUN npm install -g season | ||||
|  | ||||
| RUN apt-get install -y cmake | ||||
| RUN cd /tmp && git clone https://github.com/vmg/pcre | ||||
| RUN mkdir -p /tmp/pcre/build && cd /tmp/pcre/build && \ | ||||
| 	cmake .. \ | ||||
| 		-DPCRE_SUPPORT_JIT=ON \ | ||||
| 		-DPCRE_SUPPORT_UTF=ON \ | ||||
| 		-DPCRE_SUPPORT_UNICODE_PROPERTIES=ON \ | ||||
| 		-DBUILD_SHARED_LIBS=OFF \ | ||||
| 		-DCMAKE_C_FLAGS="-fPIC $(EXTRA_PCRE_CFLAGS)" \ | ||||
| 		-DCMAKE_BUILD_TYPE=RelWithDebInfo \ | ||||
| 		-DPCRE_BUILD_PCRECPP=OFF \ | ||||
| 		-DPCRE_BUILD_PCREGREP=OFF \ | ||||
| 		-DPCRE_BUILD_TESTS=OFF \ | ||||
| 		-G "Unix Makefiles" && \ | ||||
|     make && make install | ||||
| RUN rm -rf /tmp/pcre | ||||
|  | ||||
| RUN go get -u github.com/golang/dep/cmd/dep | ||||
| WORKDIR /go/src/github.com/github/linguist/tools/grammars | ||||
| COPY . . | ||||
|  | ||||
| RUN dep ensure | ||||
| RUN go install ./cmd/grammar-compiler | ||||
|  | ||||
| ENTRYPOINT ["grammar-compiler"] | ||||
							
								
								
									
										51
									
								
								tools/grammars/Gopkg.lock
									
									
									
										generated
									
									
									
										Normal file
									
								
							
							
						
						
									
										51
									
								
								tools/grammars/Gopkg.lock
									
									
									
										generated
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,51 @@ | ||||
| # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. | ||||
|  | ||||
|  | ||||
| [[projects]] | ||||
|   branch = "master" | ||||
|   name = "github.com/golang/protobuf" | ||||
|   packages = ["proto"] | ||||
|   revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845" | ||||
|  | ||||
| [[projects]] | ||||
|   branch = "master" | ||||
|   name = "github.com/groob/plist" | ||||
|   packages = ["."] | ||||
|   revision = "7b367e0aa692e62a223e823f3288c0c00f519a36" | ||||
|  | ||||
| [[projects]] | ||||
|   name = "github.com/mattn/go-runewidth" | ||||
|   packages = ["."] | ||||
|   revision = "9e777a8366cce605130a531d2cd6363d07ad7317" | ||||
|   version = "v0.0.2" | ||||
|  | ||||
| [[projects]] | ||||
|   branch = "master" | ||||
|   name = "github.com/mitchellh/mapstructure" | ||||
|   packages = ["."] | ||||
|   revision = "06020f85339e21b2478f756a78e295255ffa4d6a" | ||||
|  | ||||
| [[projects]] | ||||
|   name = "github.com/urfave/cli" | ||||
|   packages = ["."] | ||||
|   revision = "cfb38830724cc34fedffe9a2a29fb54fa9169cd1" | ||||
|   version = "v1.20.0" | ||||
|  | ||||
| [[projects]] | ||||
|   name = "gopkg.in/cheggaaa/pb.v1" | ||||
|   packages = ["."] | ||||
|   revision = "657164d0228d6bebe316fdf725c69f131a50fb10" | ||||
|   version = "v1.0.18" | ||||
|  | ||||
| [[projects]] | ||||
|   branch = "v2" | ||||
|   name = "gopkg.in/yaml.v2" | ||||
|   packages = ["."] | ||||
|   revision = "287cf08546ab5e7e37d55a84f7ed3fd1db036de5" | ||||
|  | ||||
| [solve-meta] | ||||
|   analyzer-name = "dep" | ||||
|   analyzer-version = 1 | ||||
|   inputs-digest = "ba2e3150d728692b49e3e2d652b6ea23db82777c340e0c432cd4af6f0eef9f55" | ||||
|   solver-name = "gps-cdcl" | ||||
|   solver-version = 1 | ||||
							
								
								
									
										23
									
								
								tools/grammars/Gopkg.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								tools/grammars/Gopkg.toml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,23 @@ | ||||
| [[constraint]] | ||||
|   branch = "v2" | ||||
|   name = "gopkg.in/yaml.v2" | ||||
|  | ||||
| [[constraint]] | ||||
|   branch = "master" | ||||
|   name = "github.com/groob/plist" | ||||
|  | ||||
| [[constraint]] | ||||
|   branch = "master" | ||||
|   name = "github.com/golang/protobuf" | ||||
|  | ||||
| [[constraint]] | ||||
|   branch = "master" | ||||
|   name = "github.com/mitchellh/mapstructure" | ||||
|  | ||||
| [[constraint]] | ||||
|   name = "gopkg.in/cheggaaa/pb.v1" | ||||
|   version = "1.0.18" | ||||
|  | ||||
| [[constraint]] | ||||
|   name = "github.com/urfave/cli" | ||||
|   version = "1.20.0" | ||||
							
								
								
									
										120
									
								
								tools/grammars/cmd/grammar-compiler/main.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										120
									
								
								tools/grammars/cmd/grammar-compiler/main.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,120 @@ | ||||
| package main | ||||
|  | ||||
| import ( | ||||
| 	"os" | ||||
|  | ||||
| 	"github.com/github/linguist/tools/grammars/compiler" | ||||
| 	"github.com/urfave/cli" | ||||
| ) | ||||
|  | ||||
| func cwd() string { | ||||
| 	cwd, _ := os.Getwd() | ||||
| 	return cwd | ||||
| } | ||||
|  | ||||
| func wrap(err error) error { | ||||
| 	return cli.NewExitError(err, 255) | ||||
| } | ||||
|  | ||||
| func main() { | ||||
| 	app := cli.NewApp() | ||||
| 	app.Name = "Linguist Grammars Compiler" | ||||
| 	app.Usage = "Compile user-submitted grammars and check them for errors" | ||||
|  | ||||
| 	app.Flags = []cli.Flag{ | ||||
| 		cli.StringFlag{ | ||||
| 			Name:  "linguist-path", | ||||
| 			Value: cwd(), | ||||
| 			Usage: "path to Linguist root", | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	app.Commands = []cli.Command{ | ||||
| 		{ | ||||
| 			Name:  "add", | ||||
| 			Usage: "add a new grammar source", | ||||
| 			Flags: []cli.Flag{ | ||||
| 				cli.BoolFlag{ | ||||
| 					Name:  "force, f", | ||||
| 					Usage: "ignore compilation errors", | ||||
| 				}, | ||||
| 			}, | ||||
| 			Action: func(c *cli.Context) error { | ||||
| 				conv, err := compiler.NewConverter(c.String("linguist-path")) | ||||
| 				if err != nil { | ||||
| 					return wrap(err) | ||||
| 				} | ||||
| 				if err := conv.AddGrammar(c.Args().First()); err != nil { | ||||
| 					if !c.Bool("force") { | ||||
| 						return wrap(err) | ||||
| 					} | ||||
| 				} | ||||
| 				if err := conv.WriteGrammarList(); err != nil { | ||||
| 					return wrap(err) | ||||
| 				} | ||||
| 				return nil | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			Name:  "update", | ||||
| 			Usage: "update grammars.yml with the contents of the grammars library", | ||||
| 			Flags: []cli.Flag{ | ||||
| 				cli.BoolFlag{ | ||||
| 					Name:  "force, f", | ||||
| 					Usage: "write grammars.yml even if grammars fail to compile", | ||||
| 				}, | ||||
| 			}, | ||||
| 			Action: func(c *cli.Context) error { | ||||
| 				conv, err := compiler.NewConverter(c.String("linguist-path")) | ||||
| 				if err != nil { | ||||
| 					return wrap(err) | ||||
| 				} | ||||
| 				if err := conv.ConvertGrammars(true); err != nil { | ||||
| 					return wrap(err) | ||||
| 				} | ||||
| 				if err := conv.Report(); err != nil { | ||||
| 					if !c.Bool("force") { | ||||
| 						return wrap(err) | ||||
| 					} | ||||
| 				} | ||||
| 				if err := conv.WriteGrammarList(); err != nil { | ||||
| 					return wrap(err) | ||||
| 				} | ||||
| 				return nil | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			Name:  "compile", | ||||
| 			Usage: "convert the grammars from the library", | ||||
| 			Flags: []cli.Flag{ | ||||
| 				cli.StringFlag{Name: "proto-out, P"}, | ||||
| 				cli.StringFlag{Name: "out, o"}, | ||||
| 			}, | ||||
| 			Action: func(c *cli.Context) error { | ||||
| 				conv, err := compiler.NewConverter(c.String("linguist-path")) | ||||
| 				if err != nil { | ||||
| 					return cli.NewExitError(err, 1) | ||||
| 				} | ||||
| 				if err := conv.ConvertGrammars(false); err != nil { | ||||
| 					return cli.NewExitError(err, 1) | ||||
| 				} | ||||
| 				if out := c.String("proto-out"); out != "" { | ||||
| 					if err := conv.WriteProto(out); err != nil { | ||||
| 						return cli.NewExitError(err, 1) | ||||
| 					} | ||||
| 				} | ||||
| 				if out := c.String("out"); out != "" { | ||||
| 					if err := conv.WriteJSON(out); err != nil { | ||||
| 						return cli.NewExitError(err, 1) | ||||
| 					} | ||||
| 				} | ||||
| 				if err := conv.Report(); err != nil { | ||||
| 					return wrap(err) | ||||
| 				} | ||||
| 				return nil | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	app.Run(os.Args) | ||||
| } | ||||
							
								
								
									
										261
									
								
								tools/grammars/compiler/converter.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										261
									
								
								tools/grammars/compiler/converter.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,261 @@ | ||||
| package compiler | ||||
|  | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"io/ioutil" | ||||
| 	"os" | ||||
| 	"path" | ||||
| 	"runtime" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
|  | ||||
| 	grammar "github.com/github/linguist/tools/grammars/proto" | ||||
| 	"github.com/golang/protobuf/proto" | ||||
| 	pb "gopkg.in/cheggaaa/pb.v1" | ||||
| 	yaml "gopkg.in/yaml.v2" | ||||
| ) | ||||
|  | ||||
| type Converter struct { | ||||
| 	root string | ||||
|  | ||||
| 	modified bool | ||||
| 	grammars map[string][]string | ||||
| 	Loaded   map[string]*Repository | ||||
|  | ||||
| 	progress *pb.ProgressBar | ||||
| 	wg       sync.WaitGroup | ||||
| 	queue    chan string | ||||
| 	mu       sync.Mutex | ||||
| } | ||||
|  | ||||
| func (conv *Converter) Load(src string) *Repository { | ||||
| 	if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { | ||||
| 		return LoadFromURL(src) | ||||
| 	} | ||||
| 	return LoadFromFilesystem(conv.root, src) | ||||
| } | ||||
|  | ||||
| func (conv *Converter) work() { | ||||
| 	for source := range conv.queue { | ||||
| 		repo := conv.Load(source) | ||||
|  | ||||
| 		conv.mu.Lock() | ||||
| 		conv.Loaded[source] = repo | ||||
| 		conv.mu.Unlock() | ||||
|  | ||||
| 		conv.progress.Increment() | ||||
| 	} | ||||
|  | ||||
| 	conv.wg.Done() | ||||
| } | ||||
|  | ||||
| func (conv *Converter) tmpScopes() map[string]bool { | ||||
| 	scopes := make(map[string]bool) | ||||
| 	for _, ary := range conv.grammars { | ||||
| 		for _, s := range ary { | ||||
| 			scopes[s] = true | ||||
| 		} | ||||
| 	} | ||||
| 	return scopes | ||||
| } | ||||
|  | ||||
| func (conv *Converter) AddGrammar(source string) error { | ||||
| 	repo := conv.Load(source) | ||||
| 	if len(repo.Files) == 0 { | ||||
| 		return fmt.Errorf("source '%s' contains no grammar files", source) | ||||
| 	} | ||||
|  | ||||
| 	conv.grammars[source] = repo.Scopes() | ||||
| 	conv.modified = true | ||||
|  | ||||
| 	knownScopes := conv.tmpScopes() | ||||
| 	repo.FixRules(knownScopes) | ||||
|  | ||||
| 	if len(repo.Errors) > 0 { | ||||
| 		fmt.Fprintf(os.Stderr, "The new grammar %s contains %d errors:\n", | ||||
| 			repo, len(repo.Errors)) | ||||
| 		for _, err := range repo.Errors { | ||||
| 			fmt.Fprintf(os.Stderr, "    - %s\n", err) | ||||
| 		} | ||||
| 		fmt.Fprintf(os.Stderr, "\n") | ||||
| 		return fmt.Errorf("failed to compile the given grammar") | ||||
| 	} | ||||
|  | ||||
| 	fmt.Printf("OK! added grammar source '%s'\n", source) | ||||
| 	for scope := range repo.Files { | ||||
| 		fmt.Printf("\tnew scope: %s\n", scope) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (conv *Converter) AllScopes() map[string]bool { | ||||
| 	// Map from scope -> Repository first to error check | ||||
| 	// possible duplicates | ||||
| 	allScopes := make(map[string]*Repository) | ||||
| 	for _, repo := range conv.Loaded { | ||||
| 		for scope := range repo.Files { | ||||
| 			if original := allScopes[scope]; original != nil { | ||||
| 				repo.Fail(&DuplicateScopeError{original, scope}) | ||||
| 			} else { | ||||
| 				allScopes[scope] = repo | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Convert to scope -> bool | ||||
| 	scopes := make(map[string]bool) | ||||
| 	for s := range allScopes { | ||||
| 		scopes[s] = true | ||||
| 	} | ||||
| 	return scopes | ||||
| } | ||||
|  | ||||
| func (conv *Converter) ConvertGrammars(update bool) error { | ||||
| 	conv.Loaded = make(map[string]*Repository) | ||||
| 	conv.queue = make(chan string, 128) | ||||
|  | ||||
| 	conv.progress = pb.New(len(conv.grammars)) | ||||
| 	conv.progress.Start() | ||||
|  | ||||
| 	for i := 0; i < runtime.NumCPU(); i++ { | ||||
| 		conv.wg.Add(1) | ||||
| 		go conv.work() | ||||
| 	} | ||||
|  | ||||
| 	for src := range conv.grammars { | ||||
| 		conv.queue <- src | ||||
| 	} | ||||
|  | ||||
| 	close(conv.queue) | ||||
| 	conv.wg.Wait() | ||||
|  | ||||
| 	done := fmt.Sprintf("done! processed %d grammars\n", len(conv.Loaded)) | ||||
| 	conv.progress.FinishPrint(done) | ||||
|  | ||||
| 	if update { | ||||
| 		conv.grammars = make(map[string][]string) | ||||
| 		conv.modified = true | ||||
| 	} | ||||
|  | ||||
| 	knownScopes := conv.AllScopes() | ||||
|  | ||||
| 	for source, repo := range conv.Loaded { | ||||
| 		repo.FixRules(knownScopes) | ||||
|  | ||||
| 		if update { | ||||
| 			conv.grammars[source] = repo.Scopes() | ||||
| 		} else { | ||||
| 			expected := conv.grammars[source] | ||||
| 			repo.CompareScopes(expected) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (conv *Converter) WriteProto(path string) error { | ||||
| 	library := grammar.Library{ | ||||
| 		Grammars: make(map[string]*grammar.Rule), | ||||
| 	} | ||||
|  | ||||
| 	for _, repo := range conv.Loaded { | ||||
| 		for scope, file := range repo.Files { | ||||
| 			library.Grammars[scope] = file.Rule | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	pb, err := proto.Marshal(&library) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return ioutil.WriteFile(path, pb, 0666) | ||||
| } | ||||
|  | ||||
| func (conv *Converter) writeJSONFile(path string, rule *grammar.Rule) error { | ||||
| 	j, err := os.Create(path) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	defer j.Close() | ||||
|  | ||||
| 	enc := json.NewEncoder(j) | ||||
| 	enc.SetIndent("", "  ") | ||||
| 	return enc.Encode(rule) | ||||
| } | ||||
|  | ||||
| func (conv *Converter) WriteJSON(rulePath string) error { | ||||
| 	if err := os.MkdirAll(rulePath, os.ModePerm); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	for _, repo := range conv.Loaded { | ||||
| 		for scope, file := range repo.Files { | ||||
| 			p := path.Join(rulePath, scope+".json") | ||||
| 			if err := conv.writeJSONFile(p, file.Rule); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (conv *Converter) WriteGrammarList() error { | ||||
| 	if !conv.modified { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	outyml, err := yaml.Marshal(conv.grammars) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	ymlpath := path.Join(conv.root, "grammars.yml") | ||||
| 	return ioutil.WriteFile(ymlpath, outyml, 0666) | ||||
| } | ||||
|  | ||||
| func (conv *Converter) Report() error { | ||||
| 	var failed []*Repository | ||||
| 	for _, repo := range conv.Loaded { | ||||
| 		if len(repo.Errors) > 0 { | ||||
| 			failed = append(failed, repo) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	sort.Slice(failed, func(i, j int) bool { | ||||
| 		return failed[i].Source < failed[j].Source | ||||
| 	}) | ||||
|  | ||||
| 	total := 0 | ||||
| 	for _, repo := range failed { | ||||
| 		fmt.Fprintf(os.Stderr, "- [ ] %s (%d errors)\n", repo, len(repo.Errors)) | ||||
| 		for _, err := range repo.Errors { | ||||
| 			fmt.Fprintf(os.Stderr, "    - [ ] %s\n", err) | ||||
| 		} | ||||
| 		fmt.Fprintf(os.Stderr, "\n") | ||||
| 		total += len(repo.Errors) | ||||
| 	} | ||||
|  | ||||
| 	if total > 0 { | ||||
| 		return fmt.Errorf("the grammar library contains %d errors", total) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func NewConverter(root string) (*Converter, error) { | ||||
| 	yml, err := ioutil.ReadFile(path.Join(root, "grammars.yml")) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	conv := &Converter{root: root} | ||||
|  | ||||
| 	if err := yaml.Unmarshal(yml, &conv.grammars); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return conv, nil | ||||
| } | ||||
							
								
								
									
										21
									
								
								tools/grammars/compiler/cson.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								tools/grammars/compiler/cson.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,21 @@ | ||||
| package compiler | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"os/exec" | ||||
| ) | ||||
|  | ||||
| func ConvertCSON(data []byte) ([]byte, error) { | ||||
| 	stdin := bytes.NewBuffer(data) | ||||
| 	stdout := &bytes.Buffer{} | ||||
|  | ||||
| 	cmd := exec.Command("csonc") | ||||
| 	cmd.Stdin = stdin | ||||
| 	cmd.Stdout = stdout | ||||
|  | ||||
| 	if err := cmd.Run(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return stdout.Bytes(), nil | ||||
| } | ||||
							
								
								
									
										29
									
								
								tools/grammars/compiler/data.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								tools/grammars/compiler/data.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,29 @@ | ||||
| package compiler | ||||
|  | ||||
| var GrammarAliases = map[string]string{ | ||||
| 	"source.erb":         "text.html.erb", | ||||
| 	"source.cpp":         "source.c++", | ||||
| 	"source.less":        "source.css.less", | ||||
| 	"text.html.markdown": "source.gfm", | ||||
| 	"text.md":            "source.gfm", | ||||
| 	"source.php":         "text.html.php", | ||||
| 	"text.plain":         "", | ||||
| 	"source.asciidoc":    "text.html.asciidoc", | ||||
| 	"source.perl6":       "source.perl6fe", | ||||
| 	"source.css.scss":    "source.scss", | ||||
| } | ||||
|  | ||||
| var KnownFields = map[string]bool{ | ||||
| 	"comment":            true, | ||||
| 	"uuid":               true, | ||||
| 	"author":             true, | ||||
| 	"comments":           true, | ||||
| 	"macros":             true, | ||||
| 	"fileTypes":          true, | ||||
| 	"firstLineMatch":     true, | ||||
| 	"keyEquivalent":      true, | ||||
| 	"foldingStopMarker":  true, | ||||
| 	"foldingStartMarker": true, | ||||
| 	"foldingEndMarker":   true, | ||||
| 	"limitLineLength":    true, | ||||
| } | ||||
							
								
								
									
										85
									
								
								tools/grammars/compiler/errors.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										85
									
								
								tools/grammars/compiler/errors.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,85 @@ | ||||
| package compiler | ||||
|  | ||||
| import "fmt" | ||||
| import "strings" | ||||
|  | ||||
| type ConversionError struct { | ||||
| 	Path string | ||||
| 	Err  error | ||||
| } | ||||
|  | ||||
| func (err *ConversionError) Error() string { | ||||
| 	return fmt.Sprintf( | ||||
| 		"Grammar conversion failed. File `%s` failed to parse: %s", | ||||
| 		err.Path, err.Err) | ||||
| } | ||||
|  | ||||
| type DuplicateScopeError struct { | ||||
| 	Original  *Repository | ||||
| 	Duplicate string | ||||
| } | ||||
|  | ||||
| func (err *DuplicateScopeError) Error() string { | ||||
| 	return fmt.Sprintf( | ||||
| 		"Duplicate scope in repository: scope `%s` was already defined in %s", | ||||
| 		err.Duplicate, err.Original) | ||||
| } | ||||
|  | ||||
| type MissingScopeError struct { | ||||
| 	Scope string | ||||
| } | ||||
|  | ||||
| func (err *MissingScopeError) Error() string { | ||||
| 	return fmt.Sprintf( | ||||
| 		"Missing scope in repository: `%s` is listed in grammars.yml but cannot be found", | ||||
| 		err.Scope) | ||||
| } | ||||
|  | ||||
| type UnexpectedScopeError struct { | ||||
| 	File  *LoadedFile | ||||
| 	Scope string | ||||
| } | ||||
|  | ||||
| func (err *UnexpectedScopeError) Error() string { | ||||
| 	return fmt.Sprintf( | ||||
| 		"Unexpected scope in repository: `%s` declared in %s was not listed in grammars.yml", | ||||
| 		err.Scope, err.File) | ||||
| } | ||||
|  | ||||
| type MissingIncludeError struct { | ||||
| 	File    *LoadedFile | ||||
| 	Include string | ||||
| } | ||||
|  | ||||
| func (err *MissingIncludeError) Error() string { | ||||
| 	return fmt.Sprintf( | ||||
| 		"Missing include in grammar: %s attempts to include `%s` but the scope cannot be found", | ||||
| 		err.File, err.Include) | ||||
| } | ||||
|  | ||||
| type UnknownKeysError struct { | ||||
| 	File *LoadedFile | ||||
| 	Keys []string | ||||
| } | ||||
|  | ||||
| func (err *UnknownKeysError) Error() string { | ||||
| 	var keys []string | ||||
| 	for _, k := range err.Keys { | ||||
| 		keys = append(keys, fmt.Sprintf("`%s`", k)) | ||||
| 	} | ||||
|  | ||||
| 	return fmt.Sprintf( | ||||
| 		"Unknown keys in grammar: %s contains invalid keys (%s)", | ||||
| 		err.File, strings.Join(keys, ", ")) | ||||
| } | ||||
|  | ||||
| type InvalidRegexError struct { | ||||
| 	File *LoadedFile | ||||
| 	Err  error | ||||
| } | ||||
|  | ||||
| func (err *InvalidRegexError) Error() string { | ||||
| 	return fmt.Sprintf( | ||||
| 		"Invalid regex in grammar: %s contains a malformed regex (%s)", | ||||
| 		err.File, err.Err) | ||||
| } | ||||
							
								
								
									
										124
									
								
								tools/grammars/compiler/loader.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										124
									
								
								tools/grammars/compiler/loader.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,124 @@ | ||||
| package compiler | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
|  | ||||
| 	grammar "github.com/github/linguist/tools/grammars/proto" | ||||
| ) | ||||
|  | ||||
| type LoadedFile struct { | ||||
| 	Path string | ||||
| 	Rule *grammar.Rule | ||||
| } | ||||
|  | ||||
| func (f *LoadedFile) String() string { | ||||
| 	return fmt.Sprintf("`%s` (in `%s`)", f.Rule.ScopeName, f.Path) | ||||
| } | ||||
|  | ||||
| type Repository struct { | ||||
| 	Source   string | ||||
| 	Upstream string | ||||
| 	Files    map[string]*LoadedFile | ||||
| 	Errors   []error | ||||
| } | ||||
|  | ||||
| func newRepository(src string) *Repository { | ||||
| 	return &Repository{ | ||||
| 		Source: src, | ||||
| 		Files:  make(map[string]*LoadedFile), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (repo *Repository) String() string { | ||||
| 	str := fmt.Sprintf("repository `%s`", repo.Source) | ||||
| 	if repo.Upstream != "" { | ||||
| 		str = str + fmt.Sprintf(" (from %s)", repo.Upstream) | ||||
| 	} | ||||
| 	return str | ||||
| } | ||||
|  | ||||
| func (repo *Repository) Fail(err error) { | ||||
| 	repo.Errors = append(repo.Errors, err) | ||||
| } | ||||
|  | ||||
| func (repo *Repository) AddFile(path string, rule *grammar.Rule, uk []string) { | ||||
| 	file := &LoadedFile{ | ||||
| 		Path: path, | ||||
| 		Rule: rule, | ||||
| 	} | ||||
|  | ||||
| 	repo.Files[rule.ScopeName] = file | ||||
| 	if len(uk) > 0 { | ||||
| 		repo.Fail(&UnknownKeysError{file, uk}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func toMap(slice []string) map[string]bool { | ||||
| 	m := make(map[string]bool) | ||||
| 	for _, s := range slice { | ||||
| 		m[s] = true | ||||
| 	} | ||||
| 	return m | ||||
| } | ||||
|  | ||||
| func (repo *Repository) CompareScopes(scopes []string) { | ||||
| 	expected := toMap(scopes) | ||||
|  | ||||
| 	for scope, file := range repo.Files { | ||||
| 		if !expected[scope] { | ||||
| 			repo.Fail(&UnexpectedScopeError{file, scope}) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for scope := range expected { | ||||
| 		if _, ok := repo.Files[scope]; !ok { | ||||
| 			repo.Fail(&MissingScopeError{scope}) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (repo *Repository) FixRules(knownScopes map[string]bool) { | ||||
| 	for _, file := range repo.Files { | ||||
| 		w := walker{ | ||||
| 			File:    file, | ||||
| 			Known:   knownScopes, | ||||
| 			Missing: make(map[string]bool), | ||||
| 		} | ||||
|  | ||||
| 		w.walk(file.Rule) | ||||
| 		repo.Errors = append(repo.Errors, w.Errors...) | ||||
|  | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (repo *Repository) Scopes() (scopes []string) { | ||||
| 	for s := range repo.Files { | ||||
| 		scopes = append(scopes, s) | ||||
| 	} | ||||
| 	sort.Strings(scopes) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func isValidGrammar(path string, info os.FileInfo) bool { | ||||
| 	if info.IsDir() { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	dir := filepath.Dir(path) | ||||
| 	ext := filepath.Ext(path) | ||||
|  | ||||
| 	switch strings.ToLower(ext) { | ||||
| 	case ".plist": | ||||
| 		return strings.HasSuffix(dir, "/Syntaxes") | ||||
| 	case ".tmlanguage", ".yaml-tmlanguage": | ||||
| 		return true | ||||
| 	case ".cson", ".json": | ||||
| 		return strings.HasSuffix(dir, "/grammars") | ||||
| 	default: | ||||
| 		return false | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										80
									
								
								tools/grammars/compiler/loader_fs.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										80
									
								
								tools/grammars/compiler/loader_fs.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,80 @@ | ||||
| package compiler | ||||
|  | ||||
| import ( | ||||
| 	"io/ioutil" | ||||
| 	"os" | ||||
| 	"os/exec" | ||||
| 	"path" | ||||
| 	"path/filepath" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| type fsLoader struct { | ||||
| 	*Repository | ||||
| 	abspath string | ||||
| } | ||||
|  | ||||
| func (l *fsLoader) findGrammars() (files []string, err error) { | ||||
| 	err = filepath.Walk(l.abspath, | ||||
| 		func(path string, info os.FileInfo, err error) error { | ||||
| 			if err == nil && isValidGrammar(path, info) { | ||||
| 				files = append(files, path) | ||||
| 			} | ||||
| 			return nil | ||||
| 		}) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func (l *fsLoader) load() { | ||||
| 	grammars, err := l.findGrammars() | ||||
| 	if err != nil { | ||||
| 		l.Fail(err) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	for _, path := range grammars { | ||||
| 		data, err := ioutil.ReadFile(path) | ||||
| 		if err != nil { | ||||
| 			l.Fail(err) | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		if rel, err := filepath.Rel(l.abspath, path); err == nil { | ||||
| 			path = rel | ||||
| 		} | ||||
|  | ||||
| 		rule, unknown, err := ConvertProto(filepath.Ext(path), data) | ||||
| 		if err != nil { | ||||
| 			l.Fail(&ConversionError{path, err}) | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		if _, ok := l.Files[rule.ScopeName]; ok { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		l.AddFile(path, rule, unknown) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func gitRemoteName(path string) (string, error) { | ||||
| 	remote, err := exec.Command("git", "-C", path, "remote", "get-url", "origin").Output() | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	return strings.TrimSpace(string(remote)), nil | ||||
| } | ||||
|  | ||||
| func LoadFromFilesystem(root, src string) *Repository { | ||||
| 	loader := fsLoader{ | ||||
| 		Repository: newRepository(src), | ||||
| 		abspath:    path.Join(root, src), | ||||
| 	} | ||||
| 	loader.load() | ||||
|  | ||||
| 	if ups, err := gitRemoteName(loader.abspath); err == nil { | ||||
| 		loader.Repository.Upstream = ups | ||||
| 	} | ||||
|  | ||||
| 	return loader.Repository | ||||
| } | ||||
							
								
								
									
										93
									
								
								tools/grammars/compiler/loader_url.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										93
									
								
								tools/grammars/compiler/loader_url.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,93 @@ | ||||
| package compiler | ||||
|  | ||||
| import ( | ||||
| 	"archive/tar" | ||||
| 	"compress/gzip" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"net/http" | ||||
| 	"path/filepath" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| type urlLoader struct { | ||||
| 	*Repository | ||||
| } | ||||
|  | ||||
| func (l *urlLoader) loadTarball(r io.Reader) { | ||||
| 	gzf, err := gzip.NewReader(r) | ||||
| 	if err != nil { | ||||
| 		l.Fail(err) | ||||
| 		return | ||||
| 	} | ||||
| 	defer gzf.Close() | ||||
|  | ||||
| 	tarReader := tar.NewReader(gzf) | ||||
| 	for true { | ||||
| 		header, err := tarReader.Next() | ||||
|  | ||||
| 		if err != nil { | ||||
| 			if err != io.EOF { | ||||
| 				l.Fail(err) | ||||
| 			} | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		if isValidGrammar(header.Name, header.FileInfo()) { | ||||
| 			data, err := ioutil.ReadAll(tarReader) | ||||
| 			if err != nil { | ||||
| 				l.Fail(err) | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 			ext := filepath.Ext(header.Name) | ||||
| 			rule, unknown, err := ConvertProto(ext, data) | ||||
| 			if err != nil { | ||||
| 				l.Fail(&ConversionError{header.Name, err}) | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			if _, ok := l.Files[rule.ScopeName]; ok { | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			l.AddFile(header.Name, rule, unknown) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (l *urlLoader) load() { | ||||
| 	res, err := http.Get(l.Source) | ||||
| 	if err != nil { | ||||
| 		l.Fail(err) | ||||
| 		return | ||||
| 	} | ||||
| 	defer res.Body.Close() | ||||
|  | ||||
| 	if strings.HasSuffix(l.Source, ".tar.gz") { | ||||
| 		l.loadTarball(res.Body) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	data, err := ioutil.ReadAll(res.Body) | ||||
| 	if err != nil { | ||||
| 		l.Fail(err) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	ext := filepath.Ext(l.Source) | ||||
| 	filename := filepath.Base(l.Source) | ||||
| 	rule, unknown, err := ConvertProto(ext, data) | ||||
| 	if err != nil { | ||||
| 		l.Fail(&ConversionError{filename, err}) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	l.AddFile(filename, rule, unknown) | ||||
| } | ||||
|  | ||||
| func LoadFromURL(src string) *Repository { | ||||
| 	loader := urlLoader{newRepository(src)} | ||||
| 	loader.load() | ||||
| 	return loader.Repository | ||||
| } | ||||
							
								
								
									
										68
									
								
								tools/grammars/compiler/pcre.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										68
									
								
								tools/grammars/compiler/pcre.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,68 @@ | ||||
| package compiler | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
|  | ||||
| 	"github.com/github/linguist/tools/grammars/pcre" | ||||
| ) | ||||
|  | ||||
| type replacement struct { | ||||
| 	pos int | ||||
| 	len int | ||||
| 	val string | ||||
| } | ||||
|  | ||||
| func fixRegex(re string) (string, bool) { | ||||
| 	var ( | ||||
| 		replace     []replacement | ||||
| 		escape      = false | ||||
| 		hasBackRefs = false | ||||
| 	) | ||||
|  | ||||
| 	for i, ch := range re { | ||||
| 		if escape { | ||||
| 			if ch == 'h' { | ||||
| 				replace = append(replace, replacement{i - 1, 2, "[[:xdigit:]]"}) | ||||
| 			} | ||||
| 			if '0' <= ch && ch <= '9' { | ||||
| 				hasBackRefs = true | ||||
| 			} | ||||
| 		} | ||||
| 		escape = !escape && ch == '\\' | ||||
| 	} | ||||
|  | ||||
| 	if len(replace) > 0 { | ||||
| 		reb := []byte(re) | ||||
| 		offset := 0 | ||||
| 		for _, repl := range replace { | ||||
| 			reb = append( | ||||
| 				reb[:offset+repl.pos], | ||||
| 				append([]byte(repl.val), reb[offset+repl.pos+repl.len:]...)...) | ||||
| 			offset += len(repl.val) - repl.len | ||||
| 		} | ||||
| 		return string(reb), hasBackRefs | ||||
| 	} | ||||
|  | ||||
| 	return re, hasBackRefs | ||||
| } | ||||
|  | ||||
| func CheckPCRE(re string) (string, error) { | ||||
| 	hasBackRefs := false | ||||
|  | ||||
| 	if re == "" { | ||||
| 		return "", nil | ||||
| 	} | ||||
| 	if len(re) > 32*1024 { | ||||
| 		return "", fmt.Errorf( | ||||
| 			"regex %s: definition too long (%d bytes)", | ||||
| 			pcre.RegexPP(re), len(re)) | ||||
| 	} | ||||
|  | ||||
| 	re, hasBackRefs = fixRegex(re) | ||||
| 	if !hasBackRefs { | ||||
| 		if err := pcre.CheckRegexp(re, pcre.DefaultFlags); err != nil { | ||||
| 			return "", err | ||||
| 		} | ||||
| 	} | ||||
| 	return re, nil | ||||
| } | ||||
							
								
								
									
										27
									
								
								tools/grammars/compiler/pcre_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								tools/grammars/compiler/pcre_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| package compiler | ||||
|  | ||||
| import ( | ||||
| 	"testing" | ||||
| ) | ||||
|  | ||||
| func Test_fixRegex(t *testing.T) { | ||||
| 	tests := []struct { | ||||
| 		re   string | ||||
| 		want string | ||||
| 	}{ | ||||
| 		{"foobar", "foobar"}, | ||||
| 		{`testing\h`, "testing[[:xdigit:]]"}, | ||||
| 		{`\htest`, `[[:xdigit:]]test`}, | ||||
| 		{`abc\hdef`, `abc[[:xdigit:]]def`}, | ||||
| 		{`\\\htest`, `\\[[:xdigit:]]test`}, | ||||
| 		{`\\htest`, `\\htest`}, | ||||
| 		{`\h\h\h\h`, `[[:xdigit:]][[:xdigit:]][[:xdigit:]][[:xdigit:]]`}, | ||||
| 		{`abc\hdef\hghi\h`, `abc[[:xdigit:]]def[[:xdigit:]]ghi[[:xdigit:]]`}, | ||||
| 	} | ||||
| 	for _, tt := range tests { | ||||
| 		got, _ := fixRegex(tt.re) | ||||
| 		if got != tt.want { | ||||
| 			t.Errorf("fixRegex() got = %v, want %v", got, tt.want) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										96
									
								
								tools/grammars/compiler/proto.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										96
									
								
								tools/grammars/compiler/proto.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,96 @@ | ||||
| package compiler | ||||
|  | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"reflect" | ||||
| 	"strings" | ||||
|  | ||||
| 	grammar "github.com/github/linguist/tools/grammars/proto" | ||||
| 	"github.com/groob/plist" | ||||
| 	"github.com/mitchellh/mapstructure" | ||||
| 	yaml "gopkg.in/yaml.v2" | ||||
| ) | ||||
|  | ||||
| func looseDecoder(f reflect.Kind, t reflect.Kind, data interface{}) (interface{}, error) { | ||||
| 	dataVal := reflect.ValueOf(data) | ||||
| 	switch t { | ||||
| 	case reflect.Bool: | ||||
| 		switch f { | ||||
| 		case reflect.Bool: | ||||
| 			return dataVal.Bool(), nil | ||||
| 		case reflect.Float32, reflect.Float64: | ||||
| 			return (int(dataVal.Float()) != 0), nil | ||||
| 		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: | ||||
| 			return (dataVal.Int() != 0), nil | ||||
| 		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: | ||||
| 			return (dataVal.Uint() != 0), nil | ||||
| 		case reflect.String: | ||||
| 			switch dataVal.String() { | ||||
| 			case "1": | ||||
| 				return true, nil | ||||
| 			case "0": | ||||
| 				return false, nil | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return data, nil | ||||
| } | ||||
|  | ||||
| func filterUnusedKeys(keys []string) (out []string) { | ||||
| 	for _, k := range keys { | ||||
| 		parts := strings.Split(k, ".") | ||||
| 		field := parts[len(parts)-1] | ||||
| 		if !KnownFields[field] { | ||||
| 			out = append(out, k) | ||||
| 		} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func ConvertProto(ext string, data []byte) (*grammar.Rule, []string, error) { | ||||
| 	var ( | ||||
| 		raw map[string]interface{} | ||||
| 		out grammar.Rule | ||||
| 		err error | ||||
| 		md  mapstructure.Metadata | ||||
| 	) | ||||
|  | ||||
| 	switch strings.ToLower(ext) { | ||||
| 	case ".plist", ".tmlanguage": | ||||
| 		err = plist.Unmarshal(data, &raw) | ||||
| 	case ".yaml-tmlanguage": | ||||
| 		err = yaml.Unmarshal(data, &raw) | ||||
| 	case ".cson": | ||||
| 		data, err = ConvertCSON(data) | ||||
| 		if err == nil { | ||||
| 			err = json.Unmarshal(data, &raw) | ||||
| 		} | ||||
| 	case ".json": | ||||
| 		err = json.Unmarshal(data, &raw) | ||||
| 	default: | ||||
| 		err = fmt.Errorf("grammars: unsupported extension '%s'", ext) | ||||
| 	} | ||||
|  | ||||
| 	if err != nil { | ||||
| 		return nil, nil, err | ||||
| 	} | ||||
|  | ||||
| 	config := mapstructure.DecoderConfig{ | ||||
| 		Result:     &out, | ||||
| 		Metadata:   &md, | ||||
| 		DecodeHook: looseDecoder, | ||||
| 	} | ||||
|  | ||||
| 	decoder, err := mapstructure.NewDecoder(&config) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, err | ||||
| 	} | ||||
|  | ||||
| 	if err := decoder.Decode(raw); err != nil { | ||||
| 		return nil, nil, err | ||||
| 	} | ||||
|  | ||||
| 	return &out, filterUnusedKeys(md.Unused), nil | ||||
| } | ||||
							
								
								
									
										79
									
								
								tools/grammars/compiler/walker.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										79
									
								
								tools/grammars/compiler/walker.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,79 @@ | ||||
| package compiler | ||||
|  | ||||
| import ( | ||||
| 	"strings" | ||||
|  | ||||
| 	grammar "github.com/github/linguist/tools/grammars/proto" | ||||
| ) | ||||
|  | ||||
| func (w *walker) checkInclude(rule *grammar.Rule) { | ||||
| 	include := rule.Include | ||||
|  | ||||
| 	if include == "" || include[0] == '#' || include[0] == '$' { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	if alias, ok := GrammarAliases[include]; ok { | ||||
| 		rule.Include = alias | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	include = strings.Split(include, "#")[0] | ||||
| 	ok := w.Known[include] | ||||
| 	if !ok { | ||||
| 		if !w.Missing[include] { | ||||
| 			w.Missing[include] = true | ||||
| 			w.Errors = append(w.Errors, &MissingIncludeError{w.File, include}) | ||||
| 		} | ||||
| 		rule.Include = "" | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *walker) checkRegexps(rule *grammar.Rule) { | ||||
| 	check := func(re string) string { | ||||
| 		re2, err := CheckPCRE(re) | ||||
| 		if err != nil { | ||||
| 			w.Errors = append(w.Errors, &InvalidRegexError{w.File, err}) | ||||
| 		} | ||||
| 		return re2 | ||||
| 	} | ||||
|  | ||||
| 	rule.Match = check(rule.Match) | ||||
| 	rule.Begin = check(rule.Begin) | ||||
| 	rule.While = check(rule.While) | ||||
| 	rule.End = check(rule.End) | ||||
| } | ||||
|  | ||||
| func (w *walker) walk(rule *grammar.Rule) { | ||||
| 	w.checkInclude(rule) | ||||
| 	w.checkRegexps(rule) | ||||
|  | ||||
| 	for _, rule := range rule.Patterns { | ||||
| 		w.walk(rule) | ||||
| 	} | ||||
| 	for _, rule := range rule.Captures { | ||||
| 		w.walk(rule) | ||||
| 	} | ||||
| 	for _, rule := range rule.BeginCaptures { | ||||
| 		w.walk(rule) | ||||
| 	} | ||||
| 	for _, rule := range rule.WhileCaptures { | ||||
| 		w.walk(rule) | ||||
| 	} | ||||
| 	for _, rule := range rule.EndCaptures { | ||||
| 		w.walk(rule) | ||||
| 	} | ||||
| 	for _, rule := range rule.Repository { | ||||
| 		w.walk(rule) | ||||
| 	} | ||||
| 	for _, rule := range rule.Injections { | ||||
| 		w.walk(rule) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type walker struct { | ||||
| 	File    *LoadedFile | ||||
| 	Known   map[string]bool | ||||
| 	Missing map[string]bool | ||||
| 	Errors  []error | ||||
| } | ||||
							
								
								
									
										11
									
								
								tools/grammars/docker/build
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										11
									
								
								tools/grammars/docker/build
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| set -ex | ||||
| cd "$(dirname "$0")/.." | ||||
|  | ||||
| image=linguist/grammar-compiler | ||||
| docker build -t $image . | ||||
|  | ||||
| if [ "$1" = "--push" ]; then | ||||
|     docker push $image | ||||
| fi | ||||
							
								
								
									
										53
									
								
								tools/grammars/pcre/pcre.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										53
									
								
								tools/grammars/pcre/pcre.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,53 @@ | ||||
| package pcre | ||||
|  | ||||
| /* | ||||
| #cgo LDFLAGS: -lpcre | ||||
| #include <pcre.h> | ||||
| */ | ||||
| import "C" | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| 	"unsafe" | ||||
| ) | ||||
|  | ||||
| func RegexPP(re string) string { | ||||
| 	if len(re) > 32 { | ||||
| 		re = fmt.Sprintf("\"`%s`...\"", re[:32]) | ||||
| 	} else { | ||||
| 		re = fmt.Sprintf("\"`%s`\"", re) | ||||
| 	} | ||||
| 	return strings.Replace(re, "\n", "", -1) | ||||
| } | ||||
|  | ||||
| type CompileError struct { | ||||
| 	Pattern string | ||||
| 	Message string | ||||
| 	Offset  int | ||||
| } | ||||
|  | ||||
| func (e *CompileError) Error() string { | ||||
| 	return fmt.Sprintf("regex %s: %s (at offset %d)", | ||||
| 		RegexPP(e.Pattern), e.Message, e.Offset) | ||||
| } | ||||
|  | ||||
| const DefaultFlags = int(C.PCRE_DUPNAMES | C.PCRE_UTF8 | C.PCRE_NEWLINE_ANYCRLF) | ||||
|  | ||||
| func CheckRegexp(pattern string, flags int) error { | ||||
| 	pattern1 := C.CString(pattern) | ||||
| 	defer C.free(unsafe.Pointer(pattern1)) | ||||
|  | ||||
| 	var errptr *C.char | ||||
| 	var erroffset C.int | ||||
| 	ptr := C.pcre_compile(pattern1, C.int(flags), &errptr, &erroffset, nil) | ||||
| 	if ptr == nil { | ||||
| 		return &CompileError{ | ||||
| 			Pattern: pattern, | ||||
| 			Message: C.GoString(errptr), | ||||
| 			Offset:  int(erroffset), | ||||
| 		} | ||||
| 	} | ||||
| 	C.free(unsafe.Pointer(ptr)) | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										239
									
								
								tools/grammars/proto/grammar.pb.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										239
									
								
								tools/grammars/proto/grammar.pb.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,239 @@ | ||||
| // Code generated by protoc-gen-go. DO NOT EDIT. | ||||
| // source: proto/grammar.proto | ||||
|  | ||||
| /* | ||||
| Package grammar is a generated protocol buffer package. | ||||
|  | ||||
| It is generated from these files: | ||||
| 	proto/grammar.proto | ||||
|  | ||||
| It has these top-level messages: | ||||
| 	Rule | ||||
| 	Library | ||||
| */ | ||||
| package grammar | ||||
|  | ||||
| import proto "github.com/golang/protobuf/proto" | ||||
| import fmt "fmt" | ||||
| import math "math" | ||||
|  | ||||
| // Reference imports to suppress errors if they are not otherwise used. | ||||
| var _ = proto.Marshal | ||||
| var _ = fmt.Errorf | ||||
| var _ = math.Inf | ||||
|  | ||||
| // This is a compile-time assertion to ensure that this generated file | ||||
| // is compatible with the proto package it is being compiled against. | ||||
| // A compilation error at this line likely means your copy of the | ||||
| // proto package needs to be updated. | ||||
| const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package | ||||
|  | ||||
| type Rule struct { | ||||
| 	Name                string           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` | ||||
| 	ScopeName           string           `protobuf:"bytes,2,opt,name=scopeName" json:"scopeName,omitempty"` | ||||
| 	ContentName         string           `protobuf:"bytes,3,opt,name=contentName" json:"contentName,omitempty"` | ||||
| 	Match               string           `protobuf:"bytes,4,opt,name=match" json:"match,omitempty"` | ||||
| 	Begin               string           `protobuf:"bytes,5,opt,name=begin" json:"begin,omitempty"` | ||||
| 	While               string           `protobuf:"bytes,6,opt,name=while" json:"while,omitempty"` | ||||
| 	End                 string           `protobuf:"bytes,7,opt,name=end" json:"end,omitempty"` | ||||
| 	Include             string           `protobuf:"bytes,8,opt,name=include" json:"include,omitempty"` | ||||
| 	Patterns            []*Rule          `protobuf:"bytes,9,rep,name=patterns" json:"patterns,omitempty"` | ||||
| 	Captures            map[string]*Rule `protobuf:"bytes,10,rep,name=captures" json:"captures,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` | ||||
| 	BeginCaptures       map[string]*Rule `protobuf:"bytes,11,rep,name=beginCaptures" json:"beginCaptures,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` | ||||
| 	WhileCaptures       map[string]*Rule `protobuf:"bytes,12,rep,name=whileCaptures" json:"whileCaptures,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` | ||||
| 	EndCaptures         map[string]*Rule `protobuf:"bytes,13,rep,name=endCaptures" json:"endCaptures,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` | ||||
| 	Repository          map[string]*Rule `protobuf:"bytes,14,rep,name=repository" json:"repository,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` | ||||
| 	Injections          map[string]*Rule `protobuf:"bytes,15,rep,name=injections" json:"injections,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` | ||||
| 	Disabled            bool             `protobuf:"varint,16,opt,name=disabled" json:"disabled,omitempty"` | ||||
| 	ApplyEndPatternLast bool             `protobuf:"varint,17,opt,name=applyEndPatternLast" json:"applyEndPatternLast,omitempty"` | ||||
| 	IncludeResetBase    bool             `protobuf:"varint,18,opt,name=includeResetBase" json:"includeResetBase,omitempty"` | ||||
| } | ||||
|  | ||||
| func (m *Rule) Reset()                    { *m = Rule{} } | ||||
| func (m *Rule) String() string            { return proto.CompactTextString(m) } | ||||
| func (*Rule) ProtoMessage()               {} | ||||
| func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } | ||||
|  | ||||
| func (m *Rule) GetName() string { | ||||
| 	if m != nil { | ||||
| 		return m.Name | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetScopeName() string { | ||||
| 	if m != nil { | ||||
| 		return m.ScopeName | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetContentName() string { | ||||
| 	if m != nil { | ||||
| 		return m.ContentName | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetMatch() string { | ||||
| 	if m != nil { | ||||
| 		return m.Match | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetBegin() string { | ||||
| 	if m != nil { | ||||
| 		return m.Begin | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetWhile() string { | ||||
| 	if m != nil { | ||||
| 		return m.While | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetEnd() string { | ||||
| 	if m != nil { | ||||
| 		return m.End | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetInclude() string { | ||||
| 	if m != nil { | ||||
| 		return m.Include | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetPatterns() []*Rule { | ||||
| 	if m != nil { | ||||
| 		return m.Patterns | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetCaptures() map[string]*Rule { | ||||
| 	if m != nil { | ||||
| 		return m.Captures | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetBeginCaptures() map[string]*Rule { | ||||
| 	if m != nil { | ||||
| 		return m.BeginCaptures | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetWhileCaptures() map[string]*Rule { | ||||
| 	if m != nil { | ||||
| 		return m.WhileCaptures | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetEndCaptures() map[string]*Rule { | ||||
| 	if m != nil { | ||||
| 		return m.EndCaptures | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetRepository() map[string]*Rule { | ||||
| 	if m != nil { | ||||
| 		return m.Repository | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetInjections() map[string]*Rule { | ||||
| 	if m != nil { | ||||
| 		return m.Injections | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetDisabled() bool { | ||||
| 	if m != nil { | ||||
| 		return m.Disabled | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetApplyEndPatternLast() bool { | ||||
| 	if m != nil { | ||||
| 		return m.ApplyEndPatternLast | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func (m *Rule) GetIncludeResetBase() bool { | ||||
| 	if m != nil { | ||||
| 		return m.IncludeResetBase | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| type Library struct { | ||||
| 	Grammars map[string]*Rule `protobuf:"bytes,1,rep,name=grammars" json:"grammars,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` | ||||
| } | ||||
|  | ||||
| func (m *Library) Reset()                    { *m = Library{} } | ||||
| func (m *Library) String() string            { return proto.CompactTextString(m) } | ||||
| func (*Library) ProtoMessage()               {} | ||||
| func (*Library) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } | ||||
|  | ||||
| func (m *Library) GetGrammars() map[string]*Rule { | ||||
| 	if m != nil { | ||||
| 		return m.Grammars | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func init() { | ||||
| 	proto.RegisterType((*Rule)(nil), "grammar.Rule") | ||||
| 	proto.RegisterType((*Library)(nil), "grammar.Library") | ||||
| } | ||||
|  | ||||
| func init() { proto.RegisterFile("proto/grammar.proto", fileDescriptor0) } | ||||
|  | ||||
| var fileDescriptor0 = []byte{ | ||||
| 	// 486 bytes of a gzipped FileDescriptorProto | ||||
| 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcb, 0x8e, 0xd3, 0x30, | ||||
| 	0x14, 0x55, 0x66, 0xda, 0x69, 0x7a, 0x4b, 0x99, 0x72, 0x87, 0x85, 0x55, 0x1e, 0x8a, 0x86, 0x4d, | ||||
| 	0x61, 0x51, 0x10, 0x2c, 0x40, 0x23, 0x21, 0xa1, 0x41, 0x05, 0x81, 0xca, 0x43, 0xd9, 0xb0, 0x76, | ||||
| 	0x13, 0x6b, 0x26, 0x90, 0x3a, 0x91, 0xed, 0x82, 0xf2, 0x19, 0x7c, 0x19, 0xbf, 0x84, 0x7c, 0xed, | ||||
| 	0xa6, 0x49, 0xdb, 0x5d, 0x76, 0xbe, 0xe7, 0x25, 0x3b, 0x3e, 0x0e, 0x5c, 0x94, 0xaa, 0x30, 0xc5, | ||||
| 	0xf3, 0x1b, 0xc5, 0xd7, 0x6b, 0xae, 0xe6, 0x34, 0xe1, 0xc0, 0x8f, 0x97, 0xff, 0x86, 0xd0, 0x8b, | ||||
| 	0x37, 0xb9, 0x40, 0x84, 0x9e, 0xe4, 0x6b, 0xc1, 0x82, 0x28, 0x98, 0x0d, 0x63, 0x5a, 0xe3, 0x43, | ||||
| 	0x18, 0xea, 0xa4, 0x28, 0xc5, 0x57, 0x4b, 0x9c, 0x10, 0xb1, 0x03, 0x30, 0x82, 0x51, 0x52, 0x48, | ||||
| 	0x23, 0xa4, 0x21, 0xfe, 0x94, 0xf8, 0x26, 0x84, 0xf7, 0xa1, 0xbf, 0xe6, 0x26, 0xb9, 0x65, 0x3d, | ||||
| 	0xe2, 0xdc, 0x60, 0xd1, 0x95, 0xb8, 0xc9, 0x24, 0xeb, 0x3b, 0x94, 0x06, 0x8b, 0xfe, 0xb9, 0xcd, | ||||
| 	0x72, 0xc1, 0xce, 0x1c, 0x4a, 0x03, 0x4e, 0xe0, 0x54, 0xc8, 0x94, 0x0d, 0x08, 0xb3, 0x4b, 0x64, | ||||
| 	0x30, 0xc8, 0x64, 0x92, 0x6f, 0x52, 0xc1, 0x42, 0x42, 0xb7, 0x23, 0x3e, 0x85, 0xb0, 0xe4, 0xc6, | ||||
| 	0x08, 0x25, 0x35, 0x1b, 0x46, 0xa7, 0xb3, 0xd1, 0xcb, 0xf1, 0x7c, 0x7b, 0x6a, 0x7b, 0xc4, 0xb8, | ||||
| 	0xa6, 0xf1, 0x35, 0x84, 0x09, 0x2f, 0xcd, 0x46, 0x09, 0xcd, 0x80, 0xa4, 0x0f, 0x5a, 0xd2, 0xf9, | ||||
| 	0x7b, 0xcf, 0x2e, 0xa4, 0x51, 0x55, 0x5c, 0x8b, 0xf1, 0x03, 0x8c, 0x69, 0xbb, 0x5b, 0x9e, 0x8d, | ||||
| 	0xc8, 0x1d, 0xb5, 0xdd, 0xd7, 0x4d, 0x89, 0x8b, 0x68, 0xdb, 0x6c, 0x0e, 0x1d, 0xb0, 0xce, 0xb9, | ||||
| 	0x73, 0x2c, 0xe7, 0x47, 0x53, 0xe2, 0x73, 0x5a, 0x36, 0x7c, 0x07, 0x23, 0x21, 0xd3, 0x3a, 0x65, | ||||
| 	0x4c, 0x29, 0x8f, 0xdb, 0x29, 0x8b, 0x9d, 0xc0, 0x65, 0x34, 0x2d, 0xf8, 0x16, 0x40, 0x89, 0xb2, | ||||
| 	0xd0, 0x99, 0x29, 0x54, 0xc5, 0xee, 0x52, 0xc0, 0xa3, 0x76, 0x40, 0x5c, 0xf3, 0xce, 0xdf, 0x30, | ||||
| 	0x58, 0x7b, 0x26, 0x7f, 0x8a, 0xc4, 0x64, 0x85, 0xd4, 0xec, 0xfc, 0x98, 0xfd, 0x53, 0xcd, 0x7b, | ||||
| 	0xfb, 0xce, 0x80, 0x53, 0x08, 0xd3, 0x4c, 0xf3, 0x55, 0x2e, 0x52, 0x36, 0x89, 0x82, 0x59, 0x18, | ||||
| 	0xd7, 0x33, 0xbe, 0x80, 0x0b, 0x5e, 0x96, 0x79, 0xb5, 0x90, 0xe9, 0x77, 0x77, 0x71, 0x4b, 0xae, | ||||
| 	0x0d, 0xbb, 0x47, 0xb2, 0x63, 0x14, 0x3e, 0x83, 0x89, 0x2f, 0x43, 0x2c, 0xb4, 0x30, 0xd7, 0x5c, | ||||
| 	0x0b, 0x86, 0x24, 0x3f, 0xc0, 0xa7, 0x9f, 0x61, 0xdc, 0xfa, 0x2a, 0xb6, 0x6a, 0xbf, 0x44, 0xe5, | ||||
| 	0xfb, 0x6f, 0x97, 0xf8, 0x04, 0xfa, 0xbf, 0x79, 0xbe, 0x71, 0xd5, 0x3f, 0x68, 0x93, 0xe3, 0xae, | ||||
| 	0x4e, 0xde, 0x04, 0xd3, 0x6f, 0x80, 0x87, 0x57, 0xde, 0x31, 0xf0, 0xf0, 0xee, 0xbb, 0x04, 0x7e, | ||||
| 	0x81, 0xc9, 0x7e, 0x0d, 0xba, 0xc4, 0x2d, 0xe1, 0x7c, 0xaf, 0x14, 0x1d, 0xd3, 0xf6, 0x3a, 0xd2, | ||||
| 	0x21, 0xed, 0xf2, 0x6f, 0x00, 0x83, 0x65, 0xb6, 0x52, 0x5c, 0x55, 0x78, 0x05, 0xa1, 0x97, 0x69, | ||||
| 	0x16, 0xec, 0xbd, 0x0d, 0xaf, 0x99, 0x7f, 0xf4, 0x02, 0xff, 0xd4, 0xb7, 0x7a, 0x5b, 0x90, 0x16, | ||||
| 	0xd5, 0x61, 0x4f, 0xab, 0x33, 0xfa, 0xeb, 0xbe, 0xfa, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x2e, | ||||
| 	0xec, 0x55, 0x8c, 0x05, 0x00, 0x00, | ||||
| } | ||||
							
								
								
									
										6
									
								
								vendor/README.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								vendor/README.md
									
									
									
									
										vendored
									
									
								
							| @@ -70,6 +70,7 @@ This is a list of grammars that Linguist selects to provide syntax highlighting | ||||
| - **ColdFusion CFC:** [SublimeText/ColdFusion](https://github.com/SublimeText/ColdFusion) | ||||
| - **COLLADA:** [textmate/xml.tmbundle](https://github.com/textmate/xml.tmbundle) | ||||
| - **Common Lisp:** [textmate/lisp.tmbundle](https://github.com/textmate/lisp.tmbundle) | ||||
| - **Common Workflow Language:** [manabuishii/language-cwl](https://github.com/manabuishii/language-cwl) | ||||
| - **Component Pascal:** [textmate/pascal.tmbundle](https://github.com/textmate/pascal.tmbundle) | ||||
| - **Cool:** [anunayk/cool-tmbundle](https://github.com/anunayk/cool-tmbundle) | ||||
| - **Coq:** [mkolosick/Sublime-Coq](https://github.com/mkolosick/Sublime-Coq) | ||||
| @@ -168,7 +169,7 @@ This is a list of grammars that Linguist selects to provide syntax highlighting | ||||
| - **Ioke:** [vic/ioke-outdated](https://github.com/vic/ioke-outdated) | ||||
| - **Isabelle:** [lsf37/Isabelle.tmbundle](https://github.com/lsf37/Isabelle.tmbundle) | ||||
| - **Isabelle ROOT:** [lsf37/Isabelle.tmbundle](https://github.com/lsf37/Isabelle.tmbundle) | ||||
| - **J:** [bcj/JSyntax](https://github.com/bcj/JSyntax) | ||||
| - **J:** [tikkanz/JSyntax](https://github.com/tikkanz/JSyntax) | ||||
| - **Jasmin:** [atmarksharp/jasmin-sublime](https://github.com/atmarksharp/jasmin-sublime) | ||||
| - **Java:** [textmate/java.tmbundle](https://github.com/textmate/java.tmbundle) | ||||
| - **Java Server Pages:** [textmate/java.tmbundle](https://github.com/textmate/java.tmbundle) | ||||
| @@ -277,6 +278,7 @@ This is a list of grammars that Linguist selects to provide syntax highlighting | ||||
| - **PLpgSQL:** [textmate/sql.tmbundle](https://github.com/textmate/sql.tmbundle) | ||||
| - **PogoScript:** [featurist/PogoScript.tmbundle](https://github.com/featurist/PogoScript.tmbundle) | ||||
| - **Pony:** [CausalityLtd/sublime-pony](https://github.com/CausalityLtd/sublime-pony) | ||||
| - **PostCSS:** [hudochenkov/Syntax-highlighting-for-PostCSS](https://github.com/hudochenkov/Syntax-highlighting-for-PostCSS) | ||||
| - **PostScript:** [textmate/postscript.tmbundle](https://github.com/textmate/postscript.tmbundle) | ||||
| - **POV-Ray SDL:** [c-lipka/language-povray](https://github.com/c-lipka/language-povray) | ||||
| - **PowerShell:** [SublimeText/PowerShell](https://github.com/SublimeText/PowerShell) | ||||
| @@ -349,6 +351,7 @@ This is a list of grammars that Linguist selects to provide syntax highlighting | ||||
| - **Stylus:** [billymoon/Stylus](https://github.com/billymoon/Stylus) | ||||
| - **Sublime Text Config:** [atom/language-javascript](https://github.com/atom/language-javascript) | ||||
| - **SubRip Text:** [314eter/atom-language-srt](https://github.com/314eter/atom-language-srt) | ||||
| - **SugarSS:** [hudochenkov/Syntax-highlighting-for-PostCSS](https://github.com/hudochenkov/Syntax-highlighting-for-PostCSS) | ||||
| - **SuperCollider:** [supercollider/language-supercollider](https://github.com/supercollider/language-supercollider) | ||||
| - **SVG:** [textmate/xml.tmbundle](https://github.com/textmate/xml.tmbundle) | ||||
| - **Swift:** [textmate/swift.tmbundle](https://github.com/textmate/swift.tmbundle) | ||||
| @@ -405,4 +408,5 @@ This is a list of grammars that Linguist selects to provide syntax highlighting | ||||
| - **Yacc:** [textmate/bison.tmbundle](https://github.com/textmate/bison.tmbundle) | ||||
| - **YAML:** [atom/language-yaml](https://github.com/atom/language-yaml) | ||||
| - **YANG:** [DzonyKalafut/language-yang](https://github.com/DzonyKalafut/language-yang) | ||||
| - **YARA:** [blacktop/language-yara](https://github.com/blacktop/language-yara) | ||||
| - **Zephir:** [phalcon/zephir-sublime](https://github.com/phalcon/zephir-sublime) | ||||
|   | ||||
							
								
								
									
										2
									
								
								vendor/grammars/JSyntax
									
									
									
									
										vendored
									
									
								
							
							
								
								
								
								
								
							
						
						
									
										2
									
								
								vendor/grammars/JSyntax
									
									
									
									
										vendored
									
									
								
							 Submodule vendor/grammars/JSyntax updated: 74971149b5...4647952123
									
								
							
							
								
								
									
										2
									
								
								vendor/grammars/Stylus
									
									
									
									
										vendored
									
									
								
							
							
								
								
								
								
								
							
						
						
									
										2
									
								
								vendor/grammars/Stylus
									
									
									
									
										vendored
									
									
								
							 Submodule vendor/grammars/Stylus updated: 61bab33f37...4b382d28fb
									
								
							
							
								
								
									
										1
									
								
								vendor/grammars/Syntax-highlighting-for-PostCSS
									
									
									
									
										vendored
									
									
										Submodule
									
								
							
							
								
								
								
								
								
							
						
						
									
										1
									
								
								vendor/grammars/Syntax-highlighting-for-PostCSS
									
									
									
									
										vendored
									
									
										Submodule
									
								
							 Submodule vendor/grammars/Syntax-highlighting-for-PostCSS added at 575b918985
									
								
							
							
								
								
									
										2
									
								
								vendor/grammars/atom-language-perl6
									
									
									
									
										vendored
									
									
								
							
							
								
								
								
								
								
							
						
						
									
										2
									
								
								vendor/grammars/atom-language-perl6
									
									
									
									
										vendored
									
									
								
							 Submodule vendor/grammars/atom-language-perl6 updated: 611c924d0f...382720261a
									
								
							
							
								
								
									
										1
									
								
								vendor/grammars/javadoc.tmbundle
									
									
									
									
										vendored
									
									
										Submodule
									
								
							
							
								
								
								
								
								
							
						
						
									
										1
									
								
								vendor/grammars/javadoc.tmbundle
									
									
									
									
										vendored
									
									
										Submodule
									
								
							 Submodule vendor/grammars/javadoc.tmbundle added at 5276d7a93f
									
								
							
							
								
								
									
										2
									
								
								vendor/grammars/language-batchfile
									
									
									
									
										vendored
									
									
								
							
							
								
								
								
								
								
							
						
						
									
										2
									
								
								vendor/grammars/language-batchfile
									
									
									
									
										vendored
									
									
								
							 Submodule vendor/grammars/language-batchfile updated: 6d2b18ef05...163e5ffc8d
									
								
							
							
								
								
									
										1
									
								
								vendor/grammars/language-cwl
									
									
									
									
										vendored
									
									
										Submodule
									
								
							
							
								
								
								
								
								
							
						
						
									
										1
									
								
								vendor/grammars/language-cwl
									
									
									
									
										vendored
									
									
										Submodule
									
								
							 Submodule vendor/grammars/language-cwl added at 204ab237d3
									
								
							
							
								
								
									
										2
									
								
								vendor/grammars/language-roff
									
									
									
									
										vendored
									
									
								
							
							
								
								
								
								
								
							
						
						
									
										2
									
								
								vendor/grammars/language-roff
									
									
									
									
										vendored
									
									
								
							 Submodule vendor/grammars/language-roff updated: d456f404b5...0b77518f17
									
								
							
							
								
								
									
										1
									
								
								vendor/grammars/language-yara
									
									
									
									
										vendored
									
									
										Submodule
									
								
							
							
								
								
								
								
								
							
						
						
									
										1
									
								
								vendor/grammars/language-yara
									
									
									
									
										vendored
									
									
										Submodule
									
								
							 Submodule vendor/grammars/language-yara added at f08eec461a
									
								
							
							
								
								
									
										2
									
								
								vendor/grammars/sublime-aspectj
									
									
									
									
										vendored
									
									
								
							
							
								
								
								
								
								
							
						
						
									
										2
									
								
								vendor/grammars/sublime-aspectj
									
									
									
									
										vendored
									
									
								
							 Submodule vendor/grammars/sublime-aspectj updated: 043444fc3f...c1928d2335
									
								
							
							
								
								
									
										25
									
								
								vendor/licenses/grammar/Syntax-highlighting-for-PostCSS.txt
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								vendor/licenses/grammar/Syntax-highlighting-for-PostCSS.txt
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,25 @@ | ||||
| --- | ||||
| type: grammar | ||||
| name: Syntax-highlighting-for-PostCSS | ||||
| license: mit | ||||
| --- | ||||
| The MIT License (MIT) | ||||
|  | ||||
| Copyright 2017 Aleks Hudochenkov <aleks@hudochenkov.com> | ||||
|  | ||||
| Permission is hereby granted, free of charge, to any person obtaining a copy of | ||||
| this software and associated documentation files (the "Software"), to deal in | ||||
| the Software without restriction, including without limitation the rights to | ||||
| use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of | ||||
| the Software, and to permit persons to whom the Software is furnished to do so, | ||||
| subject to the following conditions: | ||||
|  | ||||
| The above copyright notice and this permission notice shall be included in all | ||||
| copies or substantial portions of the Software. | ||||
|  | ||||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS | ||||
| FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR | ||||
| COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER | ||||
| IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
| CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||||
| @@ -1,10 +1,9 @@ | ||||
| --- | ||||
| type: grammar | ||||
| name: ruby.tmbundle | ||||
| name: javadoc.tmbundle | ||||
| license: permissive | ||||
| curated: true | ||||
| --- | ||||
| 
 | ||||
| If not otherwise specified (see below), files in this repository fall under the following license: | ||||
| 
 | ||||
| 	Permission to copy, use, modify, sell and distribute this | ||||
							
								
								
									
										26
									
								
								vendor/licenses/grammar/language-cwl.txt
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								vendor/licenses/grammar/language-cwl.txt
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,26 @@ | ||||
| --- | ||||
| type: grammar | ||||
| name: language-cwl | ||||
| license: mit | ||||
| --- | ||||
| MIT License | ||||
|  | ||||
| Copyright (c) 2017 Manabu Ishii RIKEN Bioinformatics Research Unit | ||||
|  | ||||
| Permission is hereby granted, free of charge, to any person obtaining a copy | ||||
| of this software and associated documentation files (the "Software"), to deal | ||||
| in the Software without restriction, including without limitation the rights | ||||
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||
| copies of the Software, and to permit persons to whom the Software is | ||||
| furnished to do so, subject to the following conditions: | ||||
|  | ||||
| The above copyright notice and this permission notice shall be included in all | ||||
| copies or substantial portions of the Software. | ||||
|  | ||||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
| SOFTWARE. | ||||
							
								
								
									
										26
									
								
								vendor/licenses/grammar/language-yara.txt
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								vendor/licenses/grammar/language-yara.txt
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,26 @@ | ||||
| --- | ||||
| type: grammar | ||||
| name: language-yara | ||||
| license: mit | ||||
| --- | ||||
| The MIT License (MIT) | ||||
|  | ||||
| Copyright (c) 2014-2016 Blacktop | ||||
|  | ||||
| Permission is hereby granted, free of charge, to any person obtaining a copy | ||||
| of this software and associated documentation files (the "Software"), to deal | ||||
| in the Software without restriction, including without limitation the rights | ||||
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||
| copies of the Software, and to permit persons to whom the Software is | ||||
| furnished to do so, subject to the following conditions: | ||||
|  | ||||
| The above copyright notice and this permission notice shall be included in all | ||||
| copies or substantial portions of the Software. | ||||
|  | ||||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
| SOFTWARE. | ||||
		Reference in New Issue
	
	Block a user