Merge branch 'master' into 1320-local

Conflicts:
	lib/linguist/samples.json
This commit is contained in:
Arfon Smith
2014-07-02 13:27:34 -05:00
21 changed files with 67737 additions and 66755 deletions

View File

@@ -313,15 +313,7 @@ module Linguist
#
# Returns a Language or nil if none is detected
def language
return @language if defined? @language
if defined?(@data) && @data.is_a?(String)
data = @data
else
data = lambda { (binary_mime_type? || binary?) ? "" : self.data }
end
@language = Language.detect(name.to_s, data, mode)
@language ||= Language.detect(self)
end
# Internal: Get the lexer of the blob.

View File

@@ -92,18 +92,17 @@ module Linguist
# Public: Detects the Language of the blob.
#
# name - String filename
# data - String blob data. A block also maybe passed in for lazy
# loading. This behavior is deprecated and you should always
# pass in a String.
# mode - Optional String mode (defaults to nil)
# blob - an object that implements the Linguist `Blob` interface;
# see Linguist::LazyBlob and Linguist::FileBlob for examples
#
# Returns Language or nil.
def self.detect(name, data, mode = nil)
def self.detect(blob)
name = blob.name.to_s
# A bit of an elegant hack. If the file is executable but extensionless,
# append a "magic" extension so it can be classified with other
# languages that have shebang scripts.
if File.extname(name).empty? && mode && (mode.to_i(8) & 05) == 05
if File.extname(name).empty? && blob.mode && (blob.mode.to_i(8) & 05) == 05
name += ".script!"
end
@@ -114,7 +113,7 @@ module Linguist
# extension at all, in the case of extensionless scripts), we need to continue
# our detection work
if possible_languages.length > 1
data = data.call() if data.respond_to?(:call)
data = blob.data
possible_language_names = possible_languages.map(&:name)
# Don't bother with emptiness

View File

@@ -89,7 +89,7 @@ Agda:
Alloy:
type: programming # 'modeling' would be more appropiate
lexer: Text only
lexer: Alloy
color: "#cc5c24"
extensions:
- .als
@@ -222,6 +222,8 @@ BlitzBasic:
- .decls
BlitzMax:
type: programming
color: "#cd6400"
extensions:
- .bmx
@@ -735,12 +737,14 @@ GLSL:
- .glsl
- .fp
- .frag
- .frg
- .fshader
- .geom
- .glslv
- .gshader
- .shader
- .vert
- .vrx
- .vshader
Genshi:
@@ -798,6 +802,12 @@ Gosu:
extensions:
- .gs
Grace:
type: programming
lexer: Text only
extensions:
- .grace
Grammatical Framework:
type: programming
lexer: Haskell
@@ -1397,7 +1407,6 @@ Nimrod:
Nit:
type: programming
#lexer: Nit
lexer: Text only
color: "#0d8921"
extensions:
@@ -1544,6 +1553,8 @@ PHP:
- .phpt
filenames:
- Phakefile
interpreters:
- php
Pan:
type: programming

37
lib/linguist/lazy_blob.rb Normal file
View File

@@ -0,0 +1,37 @@
require 'linguist/blob_helper'
require 'rugged'
module Linguist
class LazyBlob
include BlobHelper
MAX_SIZE = 128 * 1024
attr_reader :repository
attr_reader :oid
attr_reader :name
attr_reader :mode
def initialize(repo, oid, name, mode = nil)
@repository = repo
@oid = oid
@name = name
@mode = mode
end
def data
load_blob!
@data
end
def size
load_blob!
@size
end
protected
def load_blob!
@data, @size = Rugged::Blob.to_buffer(repository, oid, MAX_SIZE) if @data.nil?
end
end
end

View File

@@ -1,4 +1,5 @@
require 'linguist/file_blob'
require 'linguist/lazy_blob'
require 'rugged'
module Linguist
# A Repository is an abstraction of a Grit::Repo or a basic file
@@ -7,100 +8,146 @@ module Linguist
# Its primary purpose is for gathering language statistics across
# the entire project.
class Repository
# Public: Initialize a new Repository from a File directory
#
# base_path - A path String
#
# Returns a Repository
def self.from_directory(base_path)
new Dir["#{base_path}/**/*"].
select { |f| File.file?(f) }.
map { |path| FileBlob.new(path, base_path) }
attr_reader :repository
# Public: Create a new Repository based on the stats of
# an existing one
def self.incremental(repo, commit_oid, old_commit_oid, old_stats)
repo = self.new(repo, commit_oid)
repo.load_existing_stats(old_commit_oid, old_stats)
repo
end
# Public: Initialize a new Repository
# Public: Initialize a new Repository to be analyzed for language
# data
#
# enum - Enumerator that responds to `each` and
# yields Blob objects
# repo - a Rugged::Repository object
# commit_oid - the sha1 of the commit that will be analyzed;
# this is usually the master branch
#
# Returns a Repository
def initialize(enum)
@enum = enum
@computed_stats = false
@language = @size = nil
@sizes = Hash.new { 0 }
@file_breakdown = Hash.new { |h,k| h[k] = Array.new }
def initialize(repo, commit_oid)
@repository = repo
@commit_oid = commit_oid
raise TypeError, 'commit_oid must be a commit SHA1' unless commit_oid.is_a?(String)
end
# Public: Load the results of a previous analysis on this repository
# to speed up the new scan.
#
# The new analysis will be performed incrementally as to only take
# into account the file changes since the last time the repository
# was scanned
#
# old_commit_oid - the sha1 of the commit that was previously analyzed
# old_stats - the result of the previous analysis, obtained by calling
# Repository#cache on the old repository
#
# Returns nothing
def load_existing_stats(old_commit_oid, old_stats)
@old_commit_oid = old_commit_oid
@old_stats = old_stats
nil
end
# Public: Returns a breakdown of language stats.
#
# Examples
#
# # => { Language['Ruby'] => 46319,
# Language['JavaScript'] => 258 }
# # => { 'Ruby' => 46319,
# 'JavaScript' => 258 }
#
# Returns a Hash of Language keys and Integer size values.
# Returns a Hash of language names and Integer size values.
def languages
compute_stats
@sizes
@sizes ||= begin
sizes = Hash.new { 0 }
cache.each do |_, (language, size)|
sizes[language] += size
end
sizes
end
end
# Public: Get primary Language of repository.
#
# Returns a Language
# Returns a language name
def language
compute_stats
@language
@language ||= begin
primary = languages.max_by { |(_, size)| size }
primary && primary[0]
end
end
# Public: Get the total size of the repository.
#
# Returns a byte size Integer
def size
compute_stats
@size
@size ||= languages.inject(0) { |s,(_,v)| s + v }
end
# Public: Return the language breakdown of this repository by file
#
# Returns a map of language names => [filenames...]
def breakdown_by_file
compute_stats
@file_breakdown
@file_breakdown ||= begin
breakdown = Hash.new { |h,k| h[k] = Array.new }
cache.each do |filename, (language, _)|
breakdown[language] << filename
end
breakdown
end
end
# Internal: Compute language breakdown for each blob in the Repository.
# Public: Return the cached results of the analysis
#
# Returns nothing
def compute_stats
return if @computed_stats
# This is a per-file breakdown that can be passed to other instances
# of Linguist::Repository to perform incremental scans
#
# Returns a map of filename => [language, size]
def cache
@cache ||= begin
if @old_commit_oid == @commit_oid
@old_stats
else
compute_stats(@old_commit_oid, @commit_oid, @old_stats)
end
end
end
@enum.each do |blob|
# Skip files that are likely binary
next if blob.likely_binary?
protected
def compute_stats(old_commit_oid, commit_oid, cache = nil)
file_map = cache ? cache.dup : {}
old_tree = old_commit_oid && Rugged::Commit.lookup(repository, old_commit_oid).tree
new_tree = Rugged::Commit.lookup(repository, commit_oid).tree
# Skip vendored or generated blobs
next if blob.vendored? || blob.generated? || blob.language.nil?
diff = Rugged::Tree.diff(repository, old_tree, new_tree)
# Only include programming languages and acceptable markup languages
if blob.language.type == :programming || Language.detectable_markup.include?(blob.language.name)
diff.each_delta do |delta|
old = delta.old_file[:path]
new = delta.new_file[:path]
# Build up the per-file breakdown stats
@file_breakdown[blob.language.group.name] << blob.name
file_map.delete(old)
next if delta.binary
@sizes[blob.language.group] += blob.size
if [:added, :modified].include? delta.status
# Skip submodules
mode = delta.new_file[:mode]
next if (mode & 040000) != 0
blob = Linguist::LazyBlob.new(repository, delta.new_file[:oid], new, mode.to_s(8))
# Skip vendored or generated blobs
next if blob.vendored? || blob.generated? || blob.language.nil?
# Only include programming languages and acceptable markup languages
if blob.language.type == :programming || Language.detectable_markup.include?(blob.language.name)
file_map[new] = [blob.language.group.name, blob.size]
end
end
end
# Compute total size
@size = @sizes.inject(0) { |s,(_,v)| s + v }
# Get primary language
if primary = @sizes.max_by { |(_, size)| size }
@language = primary[0]
end
@computed_stats = true
nil
file_map
end
end
end

File diff suppressed because it is too large Load Diff

View File

@@ -28,7 +28,7 @@ module Linguist
#
# Returns nothing.
def self.each(&block)
Dir.entries(ROOT).each do |category|
Dir.entries(ROOT).sort!.each do |category|
next if category == '.' || category == '..'
# Skip text and binary for now

View File

@@ -43,6 +43,7 @@
# Vendored dependencies
- thirdparty/
- vendors?/
- extern(al)?/
# Debian packaging
- ^debian/

View File

@@ -1,3 +1,3 @@
module Linguist
VERSION = "2.12.0"
VERSION = "3.0.0"
end