mirror of
https://github.com/KevinMidboe/linguist.git
synced 2025-10-29 17:50:22 +00:00
Merge branch 'master' into google-apps
Conflicts: lib/linguist/heuristics.rb lib/linguist/samples.json
This commit is contained in:
@@ -4,4 +4,5 @@ require 'linguist/heuristics'
|
||||
require 'linguist/language'
|
||||
require 'linguist/repository'
|
||||
require 'linguist/samples'
|
||||
require 'linguist/shebang'
|
||||
require 'linguist/version'
|
||||
|
||||
@@ -2,7 +2,6 @@ require 'linguist/generated'
|
||||
require 'charlock_holmes'
|
||||
require 'escape_utils'
|
||||
require 'mime/types'
|
||||
require 'pygments'
|
||||
require 'yaml'
|
||||
|
||||
module Linguist
|
||||
@@ -147,6 +146,13 @@ module Linguist
|
||||
end
|
||||
end
|
||||
|
||||
# Public: Is the blob empty?
|
||||
#
|
||||
# Return true or false
|
||||
def empty?
|
||||
data.nil? || data == ""
|
||||
end
|
||||
|
||||
# Public: Is the blob text?
|
||||
#
|
||||
# Return true or false
|
||||
@@ -193,10 +199,6 @@ module Linguist
|
||||
|
||||
# Public: Is the blob safe to colorize?
|
||||
#
|
||||
# We use Pygments for syntax highlighting blobs. Pygments
|
||||
# can be too slow for very large blobs or for certain
|
||||
# corner-case blobs.
|
||||
#
|
||||
# Return true or false
|
||||
def safe_to_colorize?
|
||||
!large? && text? && !high_ratio_of_long_lines?
|
||||
@@ -204,9 +206,6 @@ module Linguist
|
||||
|
||||
# Internal: Does the blob have a ratio of long lines?
|
||||
#
|
||||
# These types of files are usually going to make Pygments.rb
|
||||
# angry if we try to colorize them.
|
||||
#
|
||||
# Return true or false
|
||||
def high_ratio_of_long_lines?
|
||||
return false if loc == 0
|
||||
@@ -314,23 +313,9 @@ module Linguist
|
||||
@language ||= Language.detect(self)
|
||||
end
|
||||
|
||||
# Internal: Get the lexer of the blob.
|
||||
#
|
||||
# Returns a Lexer.
|
||||
def lexer
|
||||
language ? language.lexer : Pygments::Lexer.find_by_name('Text only')
|
||||
end
|
||||
|
||||
# Public: Highlight syntax of blob
|
||||
#
|
||||
# options - A Hash of options (defaults to {})
|
||||
#
|
||||
# Returns html String
|
||||
def colorize(options = {})
|
||||
return unless safe_to_colorize?
|
||||
options[:options] ||= {}
|
||||
options[:options][:encoding] ||= encoding
|
||||
lexer.highlight(data, options)
|
||||
# Internal: Get the TextMate compatible scope for the blob
|
||||
def tm_scope
|
||||
language && language.tm_scope
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@@ -3,6 +3,25 @@ require 'linguist/tokenizer'
|
||||
module Linguist
|
||||
# Language bayesian classifier.
|
||||
class Classifier
|
||||
# Public: Use the classifier to detect language of the blob.
|
||||
#
|
||||
# blob - An object that quacks like a blob.
|
||||
# possible_languages - Array of Language objects
|
||||
#
|
||||
# Examples
|
||||
#
|
||||
# Classifier.call(FileBlob.new("path/to/file"), [
|
||||
# Language["Ruby"], Language["Python"]
|
||||
# ])
|
||||
#
|
||||
# Returns an Array of Language objects, most probable first.
|
||||
def self.call(blob, possible_languages)
|
||||
language_names = possible_languages.map(&:name)
|
||||
classify(Samples.cache, blob.data, language_names).map do |name, _|
|
||||
Language[name] # Return the actual Language objects
|
||||
end
|
||||
end
|
||||
|
||||
# Public: Train classifier that data is a certain language.
|
||||
#
|
||||
# db - Hash classifier database object
|
||||
|
||||
@@ -57,14 +57,20 @@ module Linguist
|
||||
#
|
||||
# Returns a String.
|
||||
def extension
|
||||
# File.extname returns nil if the filename is an extension.
|
||||
extension = File.extname(name)
|
||||
basename = File.basename(name)
|
||||
# Checks if the filename is an extension.
|
||||
if extension.empty? && basename[0] == "."
|
||||
basename
|
||||
else
|
||||
extension
|
||||
extensions.last || ""
|
||||
end
|
||||
|
||||
# Public: Return an array of the file extensions
|
||||
#
|
||||
# >> Linguist::FileBlob.new("app/views/things/index.html.erb").extensions
|
||||
# => [".html.erb", ".erb"]
|
||||
#
|
||||
# Returns an Array
|
||||
def extensions
|
||||
basename, *segments = File.basename(name).split(".")
|
||||
|
||||
segments.map.with_index do |segment, index|
|
||||
"." + segments[index..-1].join(".")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@@ -51,25 +51,25 @@ module Linguist
|
||||
#
|
||||
# Return true or false
|
||||
def generated?
|
||||
name == 'Gemfile.lock' ||
|
||||
minified_files? ||
|
||||
compiled_coffeescript? ||
|
||||
xcode_file? ||
|
||||
generated_parser? ||
|
||||
generated_net_docfile? ||
|
||||
generated_net_designer_file? ||
|
||||
generated_postscript? ||
|
||||
generated_protocol_buffer? ||
|
||||
generated_jni_header? ||
|
||||
composer_lock? ||
|
||||
node_modules? ||
|
||||
vcr_cassette? ||
|
||||
generated_by_zephir?
|
||||
minified_files? ||
|
||||
compiled_coffeescript? ||
|
||||
xcode_file? ||
|
||||
generated_parser? ||
|
||||
generated_net_docfile? ||
|
||||
generated_net_designer_file? ||
|
||||
generated_postscript? ||
|
||||
generated_protocol_buffer? ||
|
||||
generated_jni_header? ||
|
||||
composer_lock? ||
|
||||
node_modules? ||
|
||||
godeps? ||
|
||||
vcr_cassette? ||
|
||||
generated_by_zephir?
|
||||
end
|
||||
|
||||
# Internal: Is the blob an Xcode file?
|
||||
#
|
||||
# Generated if the file extension is an Xcode
|
||||
# Generated if the file extension is an Xcode
|
||||
# file extension.
|
||||
#
|
||||
# Returns true of false.
|
||||
@@ -231,6 +231,14 @@ module Linguist
|
||||
!!name.match(/node_modules\//)
|
||||
end
|
||||
|
||||
# Internal: Is the blob part of Godeps/,
|
||||
# which are not meant for humans in pull requests.
|
||||
#
|
||||
# Returns true or false.
|
||||
def godeps?
|
||||
!!name.match(/Godeps\//)
|
||||
end
|
||||
|
||||
# Internal: Is the blob a generated php composer lock file?
|
||||
#
|
||||
# Returns true or false.
|
||||
@@ -256,4 +264,3 @@ module Linguist
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
13
lib/linguist/grammars.rb
Normal file
13
lib/linguist/grammars.rb
Normal file
@@ -0,0 +1,13 @@
|
||||
# Note: This file is included in the github-linguist-grammars gem, not the
|
||||
# github-linguist gem.
|
||||
|
||||
module Linguist
|
||||
module Grammars
|
||||
# Get the path to the directory containing the language grammar JSON files.
|
||||
#
|
||||
# Returns a String.
|
||||
def self.path
|
||||
File.expand_path("../../../grammars", __FILE__)
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -1,99 +1,149 @@
|
||||
module Linguist
|
||||
# A collection of simple heuristics that can be used to better analyze languages.
|
||||
class Heuristics
|
||||
ACTIVE = false
|
||||
# Public: Use heuristics to detect language of the blob.
|
||||
#
|
||||
# blob - An object that quacks like a blob.
|
||||
# possible_languages - Array of Language objects
|
||||
#
|
||||
# Examples
|
||||
#
|
||||
# Heuristics.call(FileBlob.new("path/to/file"), [
|
||||
# Language["Ruby"], Language["Python"]
|
||||
# ])
|
||||
#
|
||||
# Returns an Array of languages, or empty if none matched or were inconclusive.
|
||||
def self.call(blob, languages)
|
||||
data = blob.data
|
||||
|
||||
# Public: Given an array of String language names,
|
||||
# apply heuristics against the given data and return an array
|
||||
# of matching languages, or nil.
|
||||
@heuristics.each do |heuristic|
|
||||
return Array(heuristic.call(data)) if heuristic.matches?(languages)
|
||||
end
|
||||
|
||||
[] # No heuristics matched
|
||||
end
|
||||
|
||||
# Internal: Define a new heuristic.
|
||||
#
|
||||
# data - Array of tokens or String data to analyze.
|
||||
# languages - Array of language name Strings to restrict to.
|
||||
# languages - String names of languages to disambiguate.
|
||||
# heuristic - Block which takes data as an argument and returns a Language or nil.
|
||||
#
|
||||
# Returns an array of Languages or []
|
||||
def self.find_by_heuristics(data, languages)
|
||||
if active?
|
||||
if languages.all? { |l| ["Objective-C", "C++"].include?(l) }
|
||||
disambiguate_c(data, languages)
|
||||
end
|
||||
if languages.all? { |l| ["Perl", "Prolog"].include?(l) }
|
||||
disambiguate_pl(data, languages)
|
||||
end
|
||||
if languages.all? { |l| ["ECL", "Prolog"].include?(l) }
|
||||
disambiguate_ecl(data, languages)
|
||||
end
|
||||
if languages.all? { |l| ["TypeScript", "XML"].include?(l) }
|
||||
disambiguate_ts(data, languages)
|
||||
end
|
||||
if languages.all? { |l| ["Common Lisp", "OpenCL"].include?(l) }
|
||||
disambiguate_cl(data, languages)
|
||||
end
|
||||
if languages.all? { |l| ["Rebol", "R"].include?(l) }
|
||||
disambiguate_r(data, languages)
|
||||
end
|
||||
if languages.all? { |l| ["Gosu", "JavaScript"].include?(l) }
|
||||
disambiguate_gosu(data, languages)
|
||||
end
|
||||
# Examples
|
||||
#
|
||||
# disambiguate "Perl", "Prolog" do |data|
|
||||
# if data.include?("use strict")
|
||||
# Language["Perl"]
|
||||
# elsif data.include?(":-")
|
||||
# Language["Prolog"]
|
||||
# end
|
||||
# end
|
||||
#
|
||||
def self.disambiguate(*languages, &heuristic)
|
||||
@heuristics << new(languages, &heuristic)
|
||||
end
|
||||
|
||||
# Internal: Array of defined heuristics
|
||||
@heuristics = []
|
||||
|
||||
# Internal
|
||||
def initialize(languages, &heuristic)
|
||||
@languages = languages
|
||||
@heuristic = heuristic
|
||||
end
|
||||
|
||||
# Internal: Check if this heuristic matches the candidate languages.
|
||||
def matches?(candidates)
|
||||
candidates.all? { |l| @languages.include?(l.name) }
|
||||
end
|
||||
|
||||
# Internal: Perform the heuristic
|
||||
def call(data)
|
||||
@heuristic.call(data)
|
||||
end
|
||||
|
||||
disambiguate "Objective-C", "C++", "C" do |data|
|
||||
if (/@(interface|class|protocol|property|end|synchronised|selector|implementation)\b/.match(data))
|
||||
Language["Objective-C"]
|
||||
elsif (/^\s*#\s*include <(cstdint|string|vector|map|list|array|bitset|queue|stack|forward_list|unordered_map|unordered_set|(i|o|io)stream)>/.match(data) ||
|
||||
/^\s*template\s*</.match(data) || /^[^@]class\s+\w+/.match(data) || /^[^@](private|public|protected):$/.match(data) || /std::.+$/.match(data))
|
||||
Language["C++"]
|
||||
end
|
||||
end
|
||||
|
||||
# .h extensions are ambigious between C, C++, and Objective-C.
|
||||
# We want to shortcut look for Objective-C _and_ now C++ too!
|
||||
#
|
||||
# Returns an array of Languages or []
|
||||
def self.disambiguate_c(data, languages)
|
||||
matches = []
|
||||
matches << Language["Objective-C"] if data.include?("@interface")
|
||||
matches << Language["C++"] if data.include?("#include <cstdint>")
|
||||
matches
|
||||
disambiguate "Perl", "Prolog" do |data|
|
||||
if data.include?("use strict")
|
||||
Language["Perl"]
|
||||
elsif data.include?(":-")
|
||||
Language["Prolog"]
|
||||
end
|
||||
end
|
||||
|
||||
def self.disambiguate_pl(data, languages)
|
||||
matches = []
|
||||
matches << Language["Prolog"] if data.include?(":-")
|
||||
matches << Language["Perl"] if data.include?("use strict")
|
||||
matches
|
||||
disambiguate "ECL", "Prolog" do |data|
|
||||
if data.include?(":-")
|
||||
Language["Prolog"]
|
||||
elsif data.include?(":=")
|
||||
Language["ECL"]
|
||||
end
|
||||
end
|
||||
|
||||
def self.disambiguate_ecl(data, languages)
|
||||
matches = []
|
||||
matches << Language["Prolog"] if data.include?(":-")
|
||||
matches << Language["ECL"] if data.include?(":=")
|
||||
matches
|
||||
end
|
||||
|
||||
def self.disambiguate_ts(data, languages)
|
||||
matches = []
|
||||
if (data.include?("</translation>"))
|
||||
matches << Language["XML"]
|
||||
disambiguate "IDL", "Prolog" do |data|
|
||||
if data.include?(":-")
|
||||
Language["Prolog"]
|
||||
else
|
||||
matches << Language["TypeScript"]
|
||||
Language["IDL"]
|
||||
end
|
||||
matches
|
||||
end
|
||||
|
||||
def self.disambiguate_cl(data, languages)
|
||||
matches = []
|
||||
matches << Language["Common Lisp"] if data.include?("(defun ")
|
||||
matches << Language["OpenCL"] if /\/\* |\/\/ |^\}/.match(data)
|
||||
matches
|
||||
disambiguate "Common Lisp", "OpenCL", "Cool" do |data|
|
||||
if data.include?("(defun ")
|
||||
Language["Common Lisp"]
|
||||
elsif /^class/x.match(data)
|
||||
Language["Cool"]
|
||||
elsif /\/\* |\/\/ |^\}/.match(data)
|
||||
Language["OpenCL"]
|
||||
end
|
||||
end
|
||||
|
||||
def self.disambiguate_r(data, languages)
|
||||
matches = []
|
||||
matches << Language["Rebol"] if /\bRebol\b/i.match(data)
|
||||
matches << Language["R"] if data.include?("<-")
|
||||
matches
|
||||
disambiguate "Hack", "PHP" do |data|
|
||||
if data.include?("<?hh")
|
||||
Language["Hack"]
|
||||
elsif /<?[^h]/.match(data)
|
||||
Language["PHP"]
|
||||
end
|
||||
end
|
||||
|
||||
def self.disambiguate_gosu(data, languages)
|
||||
matches = []
|
||||
matches << Language["Gosu"] if /^uses java\./.match(data)
|
||||
matches
|
||||
disambiguate "Scala", "SuperCollider" do |data|
|
||||
if /\^(this|super)\./.match(data) || /^\s*(\+|\*)\s*\w+\s*{/.match(data) || /^\s*~\w+\s*=\./.match(data)
|
||||
Language["SuperCollider"]
|
||||
elsif /^\s*import (scala|java)\./.match(data) || /^\s*val\s+\w+\s*=/.match(data) || /^\s*class\b/.match(data)
|
||||
Language["Scala"]
|
||||
end
|
||||
end
|
||||
|
||||
def self.active?
|
||||
!!ACTIVE
|
||||
disambiguate "AsciiDoc", "AGS Script" do |data|
|
||||
Language["AsciiDoc"] if /^=+(\s|\n)/.match(data)
|
||||
end
|
||||
|
||||
disambiguate "FORTRAN", "Forth" do |data|
|
||||
if /^: /.match(data)
|
||||
Language["Forth"]
|
||||
elsif /^([c*][^a-z]| subroutine\s)/i.match(data)
|
||||
Language["FORTRAN"]
|
||||
end
|
||||
end
|
||||
|
||||
disambiguate "F#", "Forth", "GLSL" do |data|
|
||||
if /^(: |new-device)/.match(data)
|
||||
Language["Forth"]
|
||||
elsif /^(#light|import|let|module|namespace|open|type)/.match(data)
|
||||
Language["F#"]
|
||||
elsif /^(#include|#pragma|precision|uniform|varying|void)/.match(data)
|
||||
Language["GLSL"]
|
||||
end
|
||||
end
|
||||
|
||||
disambiguate "Gosu", "JavaScript" do |data|
|
||||
Language["Gosu"] if /^uses java\./.match(data)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
require 'escape_utils'
|
||||
require 'pygments'
|
||||
require 'yaml'
|
||||
begin
|
||||
require 'json'
|
||||
require 'yajl'
|
||||
rescue LoadError
|
||||
end
|
||||
|
||||
@@ -11,6 +10,8 @@ require 'linguist/heuristics'
|
||||
require 'linguist/samples'
|
||||
require 'linguist/file_blob'
|
||||
require 'linguist/blob_helper'
|
||||
require 'linguist/strategy/filename'
|
||||
require 'linguist/shebang'
|
||||
|
||||
module Linguist
|
||||
# Language names that are recognizable by GitHub. Defined languages
|
||||
@@ -62,7 +63,7 @@ module Linguist
|
||||
end
|
||||
|
||||
# Language name index
|
||||
@index[language.name] = @name_index[language.name] = language
|
||||
@index[language.name.downcase] = @name_index[language.name.downcase] = language
|
||||
|
||||
language.aliases.each do |name|
|
||||
# All Language aliases should be unique. Raise if there is a duplicate.
|
||||
@@ -70,7 +71,7 @@ module Linguist
|
||||
raise ArgumentError, "Duplicate alias: #{name}"
|
||||
end
|
||||
|
||||
@index[name] = @alias_index[name] = language
|
||||
@index[name.downcase] = @alias_index[name.downcase] = language
|
||||
end
|
||||
|
||||
language.extensions.each do |extension|
|
||||
@@ -92,6 +93,13 @@ module Linguist
|
||||
language
|
||||
end
|
||||
|
||||
STRATEGIES = [
|
||||
Linguist::Strategy::Filename,
|
||||
Linguist::Shebang,
|
||||
Linguist::Heuristics,
|
||||
Linguist::Classifier
|
||||
]
|
||||
|
||||
# Public: Detects the Language of the blob.
|
||||
#
|
||||
# blob - an object that includes the Linguist `BlobHelper` interface;
|
||||
@@ -99,51 +107,22 @@ module Linguist
|
||||
#
|
||||
# Returns Language or nil.
|
||||
def self.detect(blob)
|
||||
name = blob.name.to_s
|
||||
# Bail early if the blob is binary or empty.
|
||||
return nil if blob.likely_binary? || blob.binary? || blob.empty?
|
||||
|
||||
# Check if the blob is possibly binary and bail early; this is a cheap
|
||||
# test that uses the extension name to guess a binary binary mime type.
|
||||
#
|
||||
# We'll perform a more comprehensive test later which actually involves
|
||||
# looking for binary characters in the blob
|
||||
return nil if blob.likely_binary? || blob.binary?
|
||||
|
||||
# A bit of an elegant hack. If the file is executable but extensionless,
|
||||
# append a "magic" extension so it can be classified with other
|
||||
# languages that have shebang scripts.
|
||||
extension = FileBlob.new(name).extension
|
||||
if extension.empty? && blob.mode && (blob.mode.to_i(8) & 05) == 05
|
||||
name += ".script!"
|
||||
end
|
||||
|
||||
# First try to find languages that match based on filename.
|
||||
possible_languages = find_by_filename(name)
|
||||
|
||||
# If there is more than one possible language with that extension (or no
|
||||
# extension at all, in the case of extensionless scripts), we need to continue
|
||||
# our detection work
|
||||
if possible_languages.length > 1
|
||||
data = blob.data
|
||||
possible_language_names = possible_languages.map(&:name)
|
||||
|
||||
# Don't bother with binary contents or an empty file
|
||||
if data.nil? || data == ""
|
||||
nil
|
||||
# Check if there's a shebang line and use that as authoritative
|
||||
elsif (result = find_by_shebang(data)) && !result.empty?
|
||||
result.first
|
||||
# No shebang. Still more work to do. Try to find it with our heuristics.
|
||||
elsif (determined = Heuristics.find_by_heuristics(data, possible_language_names)) && !determined.empty?
|
||||
determined.first
|
||||
# Lastly, fall back to the probablistic classifier.
|
||||
elsif classified = Classifier.classify(Samples::DATA, data, possible_language_names ).first
|
||||
# Return the actual Language object based of the string language name (i.e., first element of `#classify`)
|
||||
Language[classified[0]]
|
||||
# Call each strategy until one candidate is returned.
|
||||
STRATEGIES.reduce([]) do |languages, strategy|
|
||||
candidates = strategy.call(blob, languages)
|
||||
if candidates.size == 1
|
||||
return candidates.first
|
||||
elsif candidates.size > 1
|
||||
# More than one candidate was found, pass them to the next strategy.
|
||||
candidates
|
||||
else
|
||||
# No candiates were found, pass on languages from the previous strategy.
|
||||
languages
|
||||
end
|
||||
else
|
||||
# Simplest and most common case, we can just return the one match based on extension
|
||||
possible_languages.first
|
||||
end
|
||||
end.first
|
||||
end
|
||||
|
||||
# Public: Get all Languages
|
||||
@@ -164,7 +143,7 @@ module Linguist
|
||||
#
|
||||
# Returns the Language or nil if none was found.
|
||||
def self.find_by_name(name)
|
||||
@name_index[name]
|
||||
name && @name_index[name.downcase]
|
||||
end
|
||||
|
||||
# Public: Look up Language by one of its aliases.
|
||||
@@ -178,7 +157,7 @@ module Linguist
|
||||
#
|
||||
# Returns the Lexer or nil if none was found.
|
||||
def self.find_by_alias(name)
|
||||
@alias_index[name]
|
||||
name && @alias_index[name.downcase]
|
||||
end
|
||||
|
||||
# Public: Look up Languages by filename.
|
||||
@@ -193,26 +172,53 @@ module Linguist
|
||||
# Returns all matching Languages or [] if none were found.
|
||||
def self.find_by_filename(filename)
|
||||
basename = File.basename(filename)
|
||||
extname = FileBlob.new(filename).extension
|
||||
langs = @filename_index[basename] +
|
||||
@extension_index[extname]
|
||||
langs.compact.uniq
|
||||
|
||||
# find the first extension with language definitions
|
||||
extname = FileBlob.new(filename).extensions.detect do |e|
|
||||
!@extension_index[e].empty?
|
||||
end
|
||||
|
||||
(@filename_index[basename] + @extension_index[extname]).compact.uniq
|
||||
end
|
||||
|
||||
# Public: Look up Languages by shebang line.
|
||||
# Public: Look up Languages by file extension.
|
||||
#
|
||||
# data - Array of tokens or String data to analyze.
|
||||
# extname - The extension String.
|
||||
#
|
||||
# Examples
|
||||
#
|
||||
# Language.find_by_shebang("#!/bin/bash\ndate;")
|
||||
# Language.find_by_extension('.rb')
|
||||
# # => [#<Language name="Ruby">]
|
||||
#
|
||||
# Language.find_by_extension('rb')
|
||||
# # => [#<Language name="Ruby">]
|
||||
#
|
||||
# Returns all matching Languages or [] if none were found.
|
||||
def self.find_by_extension(extname)
|
||||
extname = ".#{extname}" unless extname.start_with?(".")
|
||||
@extension_index[extname]
|
||||
end
|
||||
|
||||
# DEPRECATED
|
||||
def self.find_by_shebang(data)
|
||||
@interpreter_index[Shebang.interpreter(data)]
|
||||
end
|
||||
|
||||
# Public: Look up Languages by interpreter.
|
||||
#
|
||||
# interpreter - String of interpreter name
|
||||
#
|
||||
# Examples
|
||||
#
|
||||
# Language.find_by_interpreter("bash")
|
||||
# # => [#<Language name="Bash">]
|
||||
#
|
||||
# Returns the matching Language
|
||||
def self.find_by_shebang(data)
|
||||
@interpreter_index[Linguist.interpreter_from_shebang(data)]
|
||||
def self.find_by_interpreter(interpreter)
|
||||
@interpreter_index[interpreter]
|
||||
end
|
||||
|
||||
|
||||
# Public: Look up Language by its name or lexer.
|
||||
#
|
||||
# name - The String name of the Language
|
||||
@@ -227,7 +233,7 @@ module Linguist
|
||||
#
|
||||
# Returns the Language or nil if none was found.
|
||||
def self.[](name)
|
||||
@index[name]
|
||||
name && @index[name.downcase]
|
||||
end
|
||||
|
||||
# Public: A List of popular languages
|
||||
@@ -286,9 +292,16 @@ module Linguist
|
||||
# Set aliases
|
||||
@aliases = [default_alias_name] + (attributes[:aliases] || [])
|
||||
|
||||
# Lookup Lexer object
|
||||
@lexer = Pygments::Lexer.find_by_name(attributes[:lexer] || name) ||
|
||||
raise(ArgumentError, "#{@name} is missing lexer")
|
||||
# Load the TextMate scope name or try to guess one
|
||||
@tm_scope = attributes[:tm_scope] || begin
|
||||
context = case @type
|
||||
when :data, :markup, :prose
|
||||
'text'
|
||||
when :programming, nil
|
||||
'source'
|
||||
end
|
||||
"#{context}.#{@name.downcase}"
|
||||
end
|
||||
|
||||
@ace_mode = attributes[:ace_mode]
|
||||
@wrap = attributes[:wrap] || false
|
||||
@@ -363,6 +376,11 @@ module Linguist
|
||||
# Returns the Lexer
|
||||
attr_reader :lexer
|
||||
|
||||
# Public: Get the name of a TextMate-compatible scope
|
||||
#
|
||||
# Returns the scope
|
||||
attr_reader :tm_scope
|
||||
|
||||
# Public: Get Ace mode
|
||||
#
|
||||
# Examples
|
||||
@@ -406,11 +424,6 @@ module Linguist
|
||||
# Returns the extensions Array
|
||||
attr_reader :filenames
|
||||
|
||||
# Public: Return all possible extensions for language
|
||||
def all_extensions
|
||||
(extensions + [primary_extension]).uniq
|
||||
end
|
||||
|
||||
# Deprecated: Get primary extension
|
||||
#
|
||||
# Defaults to the first extension but can be overridden
|
||||
@@ -510,16 +523,16 @@ module Linguist
|
||||
end
|
||||
end
|
||||
|
||||
extensions = Samples::DATA['extnames']
|
||||
interpreters = Samples::DATA['interpreters']
|
||||
filenames = Samples::DATA['filenames']
|
||||
extensions = Samples.cache['extnames']
|
||||
interpreters = Samples.cache['interpreters']
|
||||
filenames = Samples.cache['filenames']
|
||||
popular = YAML.load_file(File.expand_path("../popular.yml", __FILE__))
|
||||
|
||||
languages_yml = File.expand_path("../languages.yml", __FILE__)
|
||||
languages_json = File.expand_path("../languages.json", __FILE__)
|
||||
|
||||
if File.exist?(languages_json) && defined?(JSON)
|
||||
languages = JSON.load(File.read(languages_json))
|
||||
if File.exist?(languages_json) && defined?(Yajl)
|
||||
languages = Yajl.load(File.read(languages_json))
|
||||
else
|
||||
languages = YAML.load_file(languages_yml)
|
||||
end
|
||||
@@ -564,12 +577,13 @@ module Linguist
|
||||
:type => options['type'],
|
||||
:aliases => options['aliases'],
|
||||
:lexer => options['lexer'],
|
||||
:tm_scope => options['tm_scope'],
|
||||
:ace_mode => options['ace_mode'],
|
||||
:wrap => options['wrap'],
|
||||
:group_name => options['group'],
|
||||
:searchable => options.key?('searchable') ? options['searchable'] : true,
|
||||
:searchable => options.fetch('searchable', true),
|
||||
:search_term => options['search_term'],
|
||||
:extensions => [options['extensions'].first] + options['extensions'][1..-1].sort,
|
||||
:extensions => Array(options['extensions']),
|
||||
:interpreters => options['interpreters'].sort,
|
||||
:filenames => options['filenames'],
|
||||
:popular => popular.include?(name)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,13 @@
|
||||
require 'linguist/blob_helper'
|
||||
require 'linguist/language'
|
||||
require 'rugged'
|
||||
|
||||
module Linguist
|
||||
class LazyBlob
|
||||
GIT_ATTR = ['linguist-language', 'linguist-vendored']
|
||||
GIT_ATTR_OPTS = { :priority => [:index], :skip_system => true }
|
||||
GIT_ATTR_FLAGS = Rugged::Repository::Attributes.parse_opts(GIT_ATTR_OPTS)
|
||||
|
||||
include BlobHelper
|
||||
|
||||
MAX_SIZE = 128 * 1024
|
||||
@@ -19,6 +24,29 @@ module Linguist
|
||||
@mode = mode
|
||||
end
|
||||
|
||||
def git_attributes
|
||||
@git_attributes ||= repository.fetch_attributes(
|
||||
name, GIT_ATTR, GIT_ATTR_FLAGS)
|
||||
end
|
||||
|
||||
def vendored?
|
||||
if attr = git_attributes['linguist-vendored']
|
||||
return boolean_attribute(attr)
|
||||
else
|
||||
return super
|
||||
end
|
||||
end
|
||||
|
||||
def language
|
||||
return @language if defined?(@language)
|
||||
|
||||
@language = if lang = git_attributes['linguist-language']
|
||||
Language.find_by_name(lang)
|
||||
else
|
||||
super
|
||||
end
|
||||
end
|
||||
|
||||
def data
|
||||
load_blob!
|
||||
@data
|
||||
@@ -30,6 +58,12 @@ module Linguist
|
||||
end
|
||||
|
||||
protected
|
||||
|
||||
# Returns true if the attribute is present and not the string "false".
|
||||
def boolean_attribute(attr)
|
||||
attr != "false"
|
||||
end
|
||||
|
||||
def load_blob!
|
||||
@data, @size = Rugged::Blob.to_buffer(repository, oid, MAX_SIZE) if @data.nil?
|
||||
end
|
||||
|
||||
@@ -110,18 +110,37 @@ module Linguist
|
||||
if @old_commit_oid == @commit_oid
|
||||
@old_stats
|
||||
else
|
||||
compute_stats(@old_commit_oid, @commit_oid, @old_stats)
|
||||
compute_stats(@old_commit_oid, @old_stats)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
protected
|
||||
def compute_stats(old_commit_oid, commit_oid, cache = nil)
|
||||
file_map = cache ? cache.dup : {}
|
||||
old_tree = old_commit_oid && Rugged::Commit.lookup(repository, old_commit_oid).tree
|
||||
new_tree = Rugged::Commit.lookup(repository, commit_oid).tree
|
||||
def read_index
|
||||
attr_index = Rugged::Index.new
|
||||
attr_index.read_tree(current_tree)
|
||||
repository.index = attr_index
|
||||
end
|
||||
|
||||
diff = Rugged::Tree.diff(repository, old_tree, new_tree)
|
||||
def current_tree
|
||||
@tree ||= Rugged::Commit.lookup(repository, @commit_oid).tree
|
||||
end
|
||||
|
||||
protected
|
||||
|
||||
def compute_stats(old_commit_oid, cache = nil)
|
||||
old_tree = old_commit_oid && Rugged::Commit.lookup(repository, old_commit_oid).tree
|
||||
|
||||
read_index
|
||||
|
||||
diff = Rugged::Tree.diff(repository, old_tree, current_tree)
|
||||
|
||||
# Clear file map and fetch full diff if any .gitattributes files are changed
|
||||
if cache && diff.each_delta.any? { |delta| File.basename(delta.new_file[:path]) == ".gitattributes" }
|
||||
diff = Rugged::Tree.diff(repository, old_tree = nil, current_tree)
|
||||
file_map = {}
|
||||
else
|
||||
file_map = cache ? cache.dup : {}
|
||||
end
|
||||
|
||||
diff.each_delta do |delta|
|
||||
old = delta.old_file[:path]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,11 +1,12 @@
|
||||
begin
|
||||
require 'json'
|
||||
require 'yajl'
|
||||
rescue LoadError
|
||||
require 'yaml'
|
||||
end
|
||||
|
||||
require 'linguist/md5'
|
||||
require 'linguist/classifier'
|
||||
require 'linguist/shebang'
|
||||
|
||||
module Linguist
|
||||
# Model for accessing classifier training data.
|
||||
@@ -17,9 +18,11 @@ module Linguist
|
||||
PATH = File.expand_path('../samples.json', __FILE__)
|
||||
|
||||
# Hash of serialized samples object
|
||||
if File.exist?(PATH)
|
||||
serializer = defined?(JSON) ? JSON : YAML
|
||||
DATA = serializer.load(File.read(PATH))
|
||||
def self.cache
|
||||
@cache ||= begin
|
||||
serializer = defined?(Yajl) ? Yajl : YAML
|
||||
serializer.load(File.read(PATH))
|
||||
end
|
||||
end
|
||||
|
||||
# Public: Iterate over each sample.
|
||||
@@ -50,14 +53,16 @@ module Linguist
|
||||
})
|
||||
end
|
||||
else
|
||||
path = File.join(dirname, filename)
|
||||
|
||||
if File.extname(filename) == ""
|
||||
raise "#{File.join(dirname, filename)} is missing an extension, maybe it belongs in filenames/ subdir"
|
||||
raise "#{path} is missing an extension, maybe it belongs in filenames/ subdir"
|
||||
end
|
||||
|
||||
yield({
|
||||
:path => File.join(dirname, filename),
|
||||
:path => path,
|
||||
:language => category,
|
||||
:interpreter => File.exist?(filename) ? Linguist.interpreter_from_shebang(File.read(filename)) : nil,
|
||||
:interpreter => Shebang.interpreter(File.read(path)),
|
||||
:extname => File.extname(filename)
|
||||
})
|
||||
end
|
||||
@@ -110,40 +115,4 @@ module Linguist
|
||||
db
|
||||
end
|
||||
end
|
||||
|
||||
# Used to retrieve the interpreter from the shebang line of a file's
|
||||
# data.
|
||||
def self.interpreter_from_shebang(data)
|
||||
lines = data.lines.to_a
|
||||
|
||||
if lines.any? && (match = lines[0].match(/(.+)\n?/)) && (bang = match[0]) =~ /^#!/
|
||||
bang.sub!(/^#! /, '#!')
|
||||
tokens = bang.split(' ')
|
||||
pieces = tokens.first.split('/')
|
||||
|
||||
if pieces.size > 1
|
||||
script = pieces.last
|
||||
else
|
||||
script = pieces.first.sub('#!', '')
|
||||
end
|
||||
|
||||
script = script == 'env' ? tokens[1] : script
|
||||
|
||||
# "python2.6" -> "python"
|
||||
if script =~ /((?:\d+\.?)+)/
|
||||
script.sub! $1, ''
|
||||
end
|
||||
|
||||
# Check for multiline shebang hacks that call `exec`
|
||||
if script == 'sh' &&
|
||||
lines[0...5].any? { |l| l.match(/exec (\w+).+\$0.+\$@/) }
|
||||
script = $1
|
||||
end
|
||||
|
||||
script
|
||||
else
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
44
lib/linguist/shebang.rb
Normal file
44
lib/linguist/shebang.rb
Normal file
@@ -0,0 +1,44 @@
|
||||
module Linguist
|
||||
class Shebang
|
||||
# Public: Use shebang to detect language of the blob.
|
||||
#
|
||||
# blob - An object that quacks like a blob.
|
||||
#
|
||||
# Examples
|
||||
#
|
||||
# Shebang.call(FileBlob.new("path/to/file"))
|
||||
#
|
||||
# Returns an Array with one Language if the blob has a shebang with a valid
|
||||
# interpreter, or empty if there is no shebang.
|
||||
def self.call(blob, _ = nil)
|
||||
Language.find_by_interpreter interpreter(blob.data)
|
||||
end
|
||||
|
||||
# Public: Get the interpreter from the shebang
|
||||
#
|
||||
# Returns a String or nil
|
||||
def self.interpreter(data)
|
||||
lines = data.lines
|
||||
return unless match = /^#! ?(.*)$/.match(lines.first)
|
||||
|
||||
tokens = match[1].split(' ')
|
||||
script = tokens.first.split('/').last
|
||||
|
||||
script = tokens[1] if script == 'env'
|
||||
|
||||
# If script has an invalid shebang, we might get here
|
||||
return unless script
|
||||
|
||||
# "python2.6" -> "python2"
|
||||
script.sub! $1, '' if script =~ /(\.\d+)$/
|
||||
|
||||
# Check for multiline shebang hacks that call `exec`
|
||||
if script == 'sh' &&
|
||||
lines.first(5).any? { |l| l.match(/exec (\w+).+\$0.+\$@/) }
|
||||
script = $1
|
||||
end
|
||||
|
||||
File.basename(script)
|
||||
end
|
||||
end
|
||||
end
|
||||
20
lib/linguist/strategy/filename.rb
Normal file
20
lib/linguist/strategy/filename.rb
Normal file
@@ -0,0 +1,20 @@
|
||||
module Linguist
|
||||
module Strategy
|
||||
# Detects language based on filename and/or extension
|
||||
class Filename
|
||||
def self.call(blob, _)
|
||||
name = blob.name.to_s
|
||||
|
||||
# A bit of an elegant hack. If the file is executable but extensionless,
|
||||
# append a "magic" extension so it can be classified with other
|
||||
# languages that have shebang scripts.
|
||||
extensions = FileBlob.new(name).extensions
|
||||
if extensions.empty? && blob.mode && (blob.mode.to_i(8) & 05) == 05
|
||||
name += ".script!"
|
||||
end
|
||||
|
||||
Language.find_by_filename(name)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -33,15 +33,19 @@
|
||||
# Erlang bundles
|
||||
- ^rebar$
|
||||
|
||||
# Bootstrap minified css and js
|
||||
- (^|/)bootstrap([^.]*)(\.min)?\.(js|css)$
|
||||
# Go dependencies
|
||||
- Godeps/_workspace/
|
||||
|
||||
# Minified JavaScript and CSS
|
||||
- (\.|-)min\.(js|css)$
|
||||
|
||||
# Bootstrap css and js
|
||||
- (^|/)bootstrap([^.]*)\.(js|css)$
|
||||
|
||||
# Font Awesome
|
||||
- font-awesome.min.css
|
||||
- font-awesome.css
|
||||
|
||||
# Foundation css
|
||||
- foundation.min.css
|
||||
- foundation.css
|
||||
|
||||
# Normalize.css
|
||||
@@ -53,7 +57,6 @@
|
||||
|
||||
# Animate.css
|
||||
- animate.css
|
||||
- animate.min.css
|
||||
|
||||
# Vendored dependencies
|
||||
- third[-_]?party/
|
||||
@@ -70,12 +73,12 @@
|
||||
## Commonly Bundled JavaScript frameworks ##
|
||||
|
||||
# jQuery
|
||||
- (^|/)jquery([^.]*)(\.min)?\.js$
|
||||
- (^|/)jquery\-\d\.\d+(\.\d+)?(\.min)?\.js$
|
||||
- (^|/)jquery([^.]*)\.js$
|
||||
- (^|/)jquery\-\d\.\d+(\.\d+)?\.js$
|
||||
|
||||
# jQuery UI
|
||||
- (^|/)jquery\-ui(\-\d\.\d+(\.\d+)?)?(\.\w+)?(\.min)?\.(js|css)$
|
||||
- (^|/)jquery\.(ui|effects)\.([^.]*)(\.min)?\.(js|css)$
|
||||
- (^|/)jquery\-ui(\-\d\.\d+(\.\d+)?)?(\.\w+)?\.(js|css)$
|
||||
- (^|/)jquery\.(ui|effects)\.([^.]*)\.(js|css)$
|
||||
|
||||
# Prototype
|
||||
- (^|/)prototype(.*)\.js$
|
||||
@@ -107,27 +110,32 @@
|
||||
# MathJax
|
||||
- (^|/)MathJax/
|
||||
|
||||
# Chart.js
|
||||
- (^|/)Chart\.js$
|
||||
|
||||
# Codemirror
|
||||
- (^|/)[Cc]ode[Mm]irror/(lib|mode|theme|addon|keymap)
|
||||
|
||||
# SyntaxHighlighter - http://alexgorbatchev.com/
|
||||
- (^|/)shBrush([^.]*)\.js$
|
||||
- (^|/)shCore\.js$
|
||||
- (^|/)shLegacy\.js$
|
||||
|
||||
# AngularJS
|
||||
- (^|/)angular([^.]*)(\.min)?\.js$
|
||||
- (^|/)angular([^.]*)\.js$
|
||||
|
||||
# D3.js
|
||||
- (^|\/)d3(\.v\d+)?([^.]*)(\.min)?\.js$
|
||||
- (^|\/)d3(\.v\d+)?([^.]*)\.js$
|
||||
|
||||
# React
|
||||
- (^|/)react(-[^.]*)?(\.min)?\.js$
|
||||
- (^|/)react(-[^.]*)?\.js$
|
||||
|
||||
# Modernizr
|
||||
- (^|/)modernizr\-\d\.\d+(\.\d+)?(\.min)?\.js$
|
||||
- (^|/)modernizr\-\d\.\d+(\.\d+)?\.js$
|
||||
- (^|/)modernizr\.custom\.\d+\.js$
|
||||
|
||||
# Knockout
|
||||
- (^|/)knockout-(\d+\.){3}(debug\.)?js$
|
||||
- knockout-min.js
|
||||
|
||||
## Python ##
|
||||
|
||||
@@ -165,8 +173,8 @@
|
||||
- \.intellisense\.js$
|
||||
|
||||
# jQuery validation plugin (MS bundles this with asp.net mvc)
|
||||
- (^|/)jquery([^.]*)\.validate(\.unobtrusive)?(\.min)?\.js$
|
||||
- (^|/)jquery([^.]*)\.unobtrusive\-ajax(\.min)?\.js$
|
||||
- (^|/)jquery([^.]*)\.validate(\.unobtrusive)?\.js$
|
||||
- (^|/)jquery([^.]*)\.unobtrusive\-ajax\.js$
|
||||
|
||||
# Microsoft Ajax
|
||||
- (^|/)[Mm]icrosoft([Mm]vc)?([Aa]jax|[Vv]alidation)(\.debug)?\.js$
|
||||
@@ -193,7 +201,7 @@
|
||||
- (^|/)extjs/welcome/
|
||||
|
||||
# Html5shiv
|
||||
- (^|/)html5shiv(\.min)?\.js$
|
||||
- (^|/)html5shiv\.js$
|
||||
|
||||
# Samples folders
|
||||
- ^[Ss]amples/
|
||||
@@ -212,8 +220,8 @@
|
||||
- ^[Tt]est/fixtures/
|
||||
|
||||
# PhoneGap/Cordova
|
||||
- (^|/)cordova([^.]*)(\.min)?\.js$
|
||||
- (^|/)cordova\-\d\.\d(\.\d)?(\.min)?\.js$
|
||||
- (^|/)cordova([^.]*)\.js$
|
||||
- (^|/)cordova\-\d\.\d(\.\d)?\.js$
|
||||
|
||||
# Foundation js
|
||||
- foundation(\..*)?\.js$
|
||||
@@ -224,14 +232,18 @@
|
||||
# .DS_Store's
|
||||
- .[Dd][Ss]_[Ss]tore$
|
||||
|
||||
# Mercury --use-subdirs
|
||||
- Mercury/
|
||||
|
||||
# R packages
|
||||
- ^vignettes/
|
||||
- ^inst/extdata/
|
||||
|
||||
# Octicons
|
||||
- octicons.css
|
||||
- octicons.min.css
|
||||
- sprockets-octicons.scss
|
||||
|
||||
# Typesafe Activator
|
||||
- (^|/)activator$
|
||||
- (^|/)activator\.bat$
|
||||
|
||||
# ProGuard
|
||||
- proguard.pro
|
||||
- proguard-rules.pro
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
module Linguist
|
||||
VERSION = "3.1.1"
|
||||
VERSION = "4.0.3"
|
||||
end
|
||||
|
||||
Reference in New Issue
Block a user