mirror of
https://github.com/KevinMidboe/linguist.git
synced 2025-10-29 17:50:22 +00:00
Make classify a function on the Classifier
This commit is contained in:
@@ -442,7 +442,7 @@ module Linguist
|
||||
if Language.ambiguous?(extname)
|
||||
possible_languages = Language.all.select { |l| l.extensions.include?(extname) }.map(&:name)
|
||||
if possible_languages.any?
|
||||
if result = Classifier.new(Samples::DATA).classify(data, possible_languages).first
|
||||
if result = Classifier.classify(Samples::DATA, data, possible_languages).first
|
||||
Language[result[0]]
|
||||
end
|
||||
end
|
||||
|
||||
@@ -3,56 +3,76 @@ require 'linguist/tokenizer'
|
||||
module Linguist
|
||||
# Language bayesian classifier.
|
||||
class Classifier
|
||||
# Public: Initialize a Classifier.
|
||||
def initialize(attrs = {})
|
||||
@tokens_total = attrs['tokens_total'] || 0
|
||||
@languages_total = attrs['languages_total'] || 0
|
||||
@tokens = attrs['tokens'] || {}
|
||||
@language_tokens = attrs['language_tokens'] || {}
|
||||
@languages = attrs['languages'] || {}
|
||||
end
|
||||
|
||||
# Public: Train classifier that data is a certain language.
|
||||
#
|
||||
# db - Hash classifier database object
|
||||
# language - String language of data
|
||||
# data - String contents of file
|
||||
#
|
||||
# Examples
|
||||
#
|
||||
# train('Ruby', "def hello; end")
|
||||
# Classifier.train(db, 'Ruby', "def hello; end")
|
||||
#
|
||||
# Returns nothing.
|
||||
def train(language, data)
|
||||
def self.train!(db, language, data)
|
||||
tokens = Tokenizer.tokenize(data)
|
||||
|
||||
db['tokens_total'] ||= 0
|
||||
db['languages_total'] ||= 0
|
||||
db['tokens'] ||= {}
|
||||
db['language_tokens'] ||= {}
|
||||
db['languages'] ||= {}
|
||||
|
||||
tokens.each do |token|
|
||||
@tokens[language] ||= {}
|
||||
@tokens[language][token] ||= 0
|
||||
@tokens[language][token] += 1
|
||||
@language_tokens[language] ||= 0
|
||||
@language_tokens[language] += 1
|
||||
@tokens_total += 1
|
||||
db['tokens'][language] ||= {}
|
||||
db['tokens'][language][token] ||= 0
|
||||
db['tokens'][language][token] += 1
|
||||
db['language_tokens'][language] ||= 0
|
||||
db['language_tokens'][language] += 1
|
||||
db['tokens_total'] += 1
|
||||
end
|
||||
@languages[language] ||= 0
|
||||
@languages[language] += 1
|
||||
@languages_total += 1
|
||||
db['languages'][language] ||= 0
|
||||
db['languages'][language] += 1
|
||||
db['languages_total'] += 1
|
||||
|
||||
nil
|
||||
end
|
||||
|
||||
# Public: Guess language of data.
|
||||
#
|
||||
# db - Hash of classifer tokens database.
|
||||
# data - Array of tokens or String data to analyze.
|
||||
# languages - Array of language name Strings to restrict to.
|
||||
#
|
||||
# Examples
|
||||
#
|
||||
# classify("def hello; end")
|
||||
# Classifier.classify(db, "def hello; end")
|
||||
# # => [ 'Ruby', 0.90], ['Python', 0.2], ... ]
|
||||
#
|
||||
# Returns sorted Array of result pairs. Each pair contains the
|
||||
# String language name and a Float score.
|
||||
def classify(tokens, languages = @languages.keys)
|
||||
def self.classify(db, tokens, languages = nil)
|
||||
languages ||= db['languages'].keys
|
||||
new(db).classify(tokens, languages)
|
||||
end
|
||||
|
||||
# Internal: Initialize a Classifier.
|
||||
def initialize(db = {})
|
||||
@tokens_total = db['tokens_total']
|
||||
@languages_total = db['languages_total']
|
||||
@tokens = db['tokens']
|
||||
@language_tokens = db['language_tokens']
|
||||
@languages = db['languages']
|
||||
end
|
||||
|
||||
# Internal: Guess language of data
|
||||
#
|
||||
# data - Array of tokens or String data to analyze.
|
||||
# languages - Array of language name Strings to restrict to.
|
||||
#
|
||||
# Returns sorted Array of result pairs. Each pair contains the
|
||||
# String language name and a Float score.
|
||||
def classify(tokens, languages)
|
||||
return [] if tokens.nil?
|
||||
tokens = Tokenizer.tokenize(tokens) if tokens.is_a?(String)
|
||||
|
||||
@@ -99,18 +119,5 @@ module Linguist
|
||||
def language_probability(language)
|
||||
Math.log(@languages[language].to_f / @languages_total.to_f)
|
||||
end
|
||||
|
||||
# Public: Returns serializable hash representation.
|
||||
#
|
||||
# Returns Hash.
|
||||
def to_hash
|
||||
{
|
||||
'tokens_total' => @tokens_total,
|
||||
'languages_total' => @languages_total,
|
||||
'tokens' => @tokens,
|
||||
'language_tokens' => @language_tokens,
|
||||
'languages' => @languages
|
||||
}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@@ -22,7 +22,7 @@ module Linguist
|
||||
#
|
||||
# Returns Boolean.
|
||||
def self.outdated?
|
||||
MD5.hexdigest(DATA) != MD5.hexdigest(classifier.to_hash)
|
||||
MD5.hexdigest(DATA) != MD5.hexdigest(data)
|
||||
end
|
||||
|
||||
# Public: Iterate over each sample.
|
||||
@@ -98,52 +98,50 @@ module Linguist
|
||||
# Public: Build Classifier from all samples.
|
||||
#
|
||||
# Returns trained Classifier.
|
||||
def self.classifier
|
||||
def self.data
|
||||
require 'linguist/classifier'
|
||||
require 'linguist/language'
|
||||
|
||||
classifier = Classifier.new
|
||||
each { |sample|
|
||||
db = {}
|
||||
each do |sample|
|
||||
language = Language.find_by_alias(sample[:language])
|
||||
data = File.read(sample[:path])
|
||||
classifier.train(language.name, data)
|
||||
}
|
||||
classifier
|
||||
Classifier.train!(db, language.name, data)
|
||||
end
|
||||
db
|
||||
end
|
||||
|
||||
# Public: Serialize samples data to YAML.
|
||||
#
|
||||
# data - Hash
|
||||
# io - IO object to write to
|
||||
# db - Hash
|
||||
#
|
||||
# Returns nothing.
|
||||
def self.serialize_to_yaml(data, io)
|
||||
data = ""
|
||||
# Returns String.
|
||||
def self.serialize_to_yaml(db)
|
||||
out = ""
|
||||
escape = lambda { |s| s.inspect.gsub(/\\#/, "\#") }
|
||||
|
||||
data << "languages_total: #{data['languages_total']}\n"
|
||||
data << "tokens_total: #{data['tokens_total']}\n"
|
||||
out << "languages_total: #{db['languages_total']}\n"
|
||||
out << "tokens_total: #{db['tokens_total']}\n"
|
||||
|
||||
data << "languages:\n"
|
||||
data['languages'].sort.each do |language, count|
|
||||
data << " #{escape.call(language)}: #{count}\n"
|
||||
out << "languages:\n"
|
||||
db['languages'].sort.each do |language, count|
|
||||
out << " #{escape.call(language)}: #{count}\n"
|
||||
end
|
||||
|
||||
data << "language_tokens:\n"
|
||||
data['language_tokens'].sort.each do |language, count|
|
||||
data << " #{escape.call(language)}: #{count}\n"
|
||||
out << "language_tokens:\n"
|
||||
db['language_tokens'].sort.each do |language, count|
|
||||
out << " #{escape.call(language)}: #{count}\n"
|
||||
end
|
||||
|
||||
data << "tokens:\n"
|
||||
data['tokens'].sort.each do |language, tokens|
|
||||
data << " #{escape.call(language)}:\n"
|
||||
out << "tokens:\n"
|
||||
db['tokens'].sort.each do |language, tokens|
|
||||
out << " #{escape.call(language)}:\n"
|
||||
tokens.sort.each do |token, count|
|
||||
data << " #{escape.call(token)}: #{count}\n"
|
||||
out << " #{escape.call(token)}: #{count}\n"
|
||||
end
|
||||
end
|
||||
|
||||
io.write data
|
||||
nil
|
||||
out
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
Reference in New Issue
Block a user