Feature #38446 » 38446.patch
lib/redmine/search.rb | ||
---|---|---|
135 | 135 |
def tokens |
136 | 136 |
# extract tokens from the question |
137 | 137 |
# eg. hello "bye bye" => ["hello", "bye bye"] |
138 |
tokens = @question.scan(%r{(([[:space:]]|^)"[^"]+"([[:space:]]|$)|[[:^space:]]+)}).collect {|m| m.first.gsub(%r{(^[[:space:]]*"[[:space:]]*|[[:space:]]*"[[:space:]]*$)}, '')} |
|
138 |
tokens = @question.scan(/"[^"]+"|[[:^space:]]+/).map do |token| |
|
139 |
# Remove quotes from quoted tokens, strip surrounding whitespace |
|
140 |
token.gsub(/"\s*|\s*"/, '').gsub(/^[[:space:]]+|[[:space:]]+$/, '') |
|
141 |
end |
|
139 | 142 |
# tokens must be at least 2 characters long |
140 | 143 |
# but for Chinese characters (Chinese HANZI/Japanese KANJI), tokens can be one character |
141 | 144 |
# no more than 5 tokens to search for |
test/unit/lib/redmine/search_test.rb | ||
---|---|---|
30 | 30 |
value = "全角\u3000スペース" |
31 | 31 |
assert_equal %w[全角 スペース], Redmine::Search::Tokenizer.new(value).tokens |
32 | 32 |
end |
33 | ||
34 |
def test_tokenize_should_support_multiple_phrases |
|
35 |
value = '"phrase one" "phrase two"' |
|
36 |
assert_equal ["phrase one", "phrase two"], Redmine::Search::Tokenizer.new(value).tokens |
|
37 |
end |
|
33 | 38 |
end |