2017-04-18 22:20:12 +02:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
|
|
|
class LanguageDetector
|
2017-09-16 14:59:41 +02:00
|
|
|
include Singleton
|
2017-04-18 22:20:12 +02:00
|
|
|
|
2019-03-26 01:23:59 +01:00
|
|
|
WORDS_THRESHOLD = 4
|
2019-03-15 05:07:09 +01:00
|
|
|
RELIABLE_CHARACTERS_RE = /[\p{Hebrew}\p{Arabic}\p{Syriac}\p{Thaana}\p{Nko}\p{Han}\p{Katakana}\p{Hiragana}\p{Hangul}]+/m
|
2018-07-14 04:05:36 +02:00
|
|
|
|
2017-09-16 14:59:41 +02:00
|
|
|
def initialize
|
Use CLD3 (#2949)
Compact Language Detector v3 (CLD3) is the successor of CLD2, which was
used in the previous implementation. CLD3 includes improvements since CLD2,
and supports newer compilers. On the other hand, it has additional
requirements and cld3-ruby, the FFI of CLD3 for Ruby, is still new and may
be still inmature.
Though CLD3 is named after CLD2, it is implemented with a neural network
model, different from the old implementation, which is based on a Naïve
Bayesian classifier.
CLD3 supports newer compilers, such as GCC 6. CLD2 is not compatible with
GCC 6 because it assigns negative values to varibales typed unsigned.
(see internal/cld_generated_cjk_uni_prop_80.cc) The support for GCC 6 and
newer compilers are essential today, when some server operating system
such as Ubuntu Server 16.10 has GCC 6 by default.
On the one hand, CLD3 requires C++11 support. Environments with old
compilers such as Ubuntu Server 14.04 needs to update the system or install
a newer compiler.
CLD3 needs protocol buffers as a new dependency. However,it is not
considered problematic because major server operating systems, CentOS and
Ubuntu Server provide them.
The FFI cld3-ruby was written by me (Akihiko Odaki) for use in Mastodon.
It is still new and may be inmature, but confirmed to pass existing tests.
2017-05-09 19:58:03 +02:00
|
|
|
@identifier = CLD3::NNetLanguageIdentifier.new(1, 2048)
|
2017-04-18 22:20:12 +02:00
|
|
|
end
|
|
|
|
|
2017-09-16 14:59:41 +02:00
|
|
|
def detect(text, account)
|
2018-07-14 04:05:36 +02:00
|
|
|
input_text = prepare_text(text)
|
2019-03-15 05:07:09 +01:00
|
|
|
|
2018-07-14 04:05:36 +02:00
|
|
|
return if input_text.blank?
|
2018-10-05 19:17:46 +02:00
|
|
|
|
2018-07-14 04:05:36 +02:00
|
|
|
detect_language_code(input_text) || default_locale(account)
|
2017-04-18 22:20:12 +02:00
|
|
|
end
|
|
|
|
|
2017-09-16 14:59:41 +02:00
|
|
|
def language_names
|
2019-03-15 05:07:09 +01:00
|
|
|
@language_names = CLD3::TaskContextParams::LANGUAGE_NAMES.map { |name| iso6391(name.to_s).to_sym }.uniq
|
2017-06-01 15:29:14 +02:00
|
|
|
end
|
|
|
|
|
2017-04-18 22:20:12 +02:00
|
|
|
private
|
|
|
|
|
2017-09-16 14:59:41 +02:00
|
|
|
def prepare_text(text)
|
|
|
|
simplify_text(text).strip
|
|
|
|
end
|
|
|
|
|
2018-07-14 04:05:36 +02:00
|
|
|
def unreliable_input?(text)
|
2019-03-15 05:07:09 +01:00
|
|
|
!reliable_input?(text)
|
|
|
|
end
|
|
|
|
|
|
|
|
def reliable_input?(text)
|
|
|
|
sufficient_text_length?(text) || language_specific_character_set?(text)
|
|
|
|
end
|
|
|
|
|
|
|
|
def sufficient_text_length?(text)
|
2019-03-26 01:23:59 +01:00
|
|
|
text.split(/\s+/).size >= WORDS_THRESHOLD
|
2019-03-15 05:07:09 +01:00
|
|
|
end
|
|
|
|
|
|
|
|
def language_specific_character_set?(text)
|
|
|
|
words = text.scan(RELIABLE_CHARACTERS_RE)
|
|
|
|
|
|
|
|
if words.present?
|
2019-12-02 18:25:43 +01:00
|
|
|
words.reduce(0) { |acc, elem| acc + elem.size }.to_f / text.size > 0.3
|
2019-03-15 05:07:09 +01:00
|
|
|
else
|
|
|
|
false
|
|
|
|
end
|
2018-07-14 04:05:36 +02:00
|
|
|
end
|
|
|
|
|
2017-09-16 14:59:41 +02:00
|
|
|
def detect_language_code(text)
|
2018-07-14 04:05:36 +02:00
|
|
|
return if unreliable_input?(text)
|
2020-03-09 00:12:52 +01:00
|
|
|
|
2018-07-14 04:05:36 +02:00
|
|
|
result = @identifier.find_language(text)
|
2020-03-09 00:12:52 +01:00
|
|
|
|
|
|
|
iso6391(result.language.to_s).to_sym if result&.reliable?
|
2017-09-08 12:32:22 +02:00
|
|
|
end
|
|
|
|
|
|
|
|
def iso6391(bcp47)
|
|
|
|
iso639 = bcp47.split('-').first
|
|
|
|
|
|
|
|
# CLD3 returns grandfathered language code for Hebrew
|
|
|
|
return 'he' if iso639 == 'iw'
|
|
|
|
|
|
|
|
ISO_639.find(iso639).alpha2
|
2017-05-03 16:59:31 +02:00
|
|
|
end
|
|
|
|
|
2017-09-16 14:59:41 +02:00
|
|
|
def simplify_text(text)
|
2017-11-16 13:51:38 +01:00
|
|
|
new_text = remove_html(text)
|
|
|
|
new_text.gsub!(FetchLinkCardService::URL_PATTERN, '')
|
|
|
|
new_text.gsub!(Account::MENTION_RE, '')
|
2019-07-18 03:02:15 +02:00
|
|
|
new_text.gsub!(Tag::HASHTAG_RE) { |string| string.gsub(/[#_]/, '#' => '', '_' => ' ').gsub(/[a-z][A-Z]|[a-zA-Z][\d]/) { |s| s.insert(1, ' ') }.downcase }
|
2017-11-16 13:51:38 +01:00
|
|
|
new_text.gsub!(/:#{CustomEmoji::SHORTCODE_RE_FRAGMENT}:/, '')
|
|
|
|
new_text.gsub!(/\s+/, ' ')
|
|
|
|
new_text
|
|
|
|
end
|
|
|
|
|
|
|
|
def new_scrubber
|
|
|
|
scrubber = Rails::Html::PermitScrubber.new
|
|
|
|
scrubber.tags = %w(br p)
|
|
|
|
scrubber
|
|
|
|
end
|
|
|
|
|
|
|
|
def scrubber
|
|
|
|
@scrubber ||= new_scrubber
|
|
|
|
end
|
|
|
|
|
|
|
|
def remove_html(text)
|
|
|
|
text = Loofah.fragment(text).scrub!(scrubber).to_s
|
|
|
|
text.gsub!('<br>', "\n")
|
|
|
|
text.gsub!('</p><p>', "\n\n")
|
|
|
|
text.gsub!(/(^<p>|<\/p>$)/, '')
|
|
|
|
text
|
2017-04-22 04:26:25 +02:00
|
|
|
end
|
|
|
|
|
2017-09-16 14:59:41 +02:00
|
|
|
def default_locale(account)
|
2019-03-15 05:07:09 +01:00
|
|
|
account.user_locale&.to_sym || I18n.default_locale if account.local?
|
2017-04-18 22:20:12 +02:00
|
|
|
end
|
|
|
|
end
|