1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
|
/**************************************************************************
OmegaT - Computer Assisted Translation (CAT) tool
with fuzzy matching, translation memory, keyword search,
glossaries, and translation leveraging into updated projects.
Copyright (C) 2013 Aaron Madlon-Kay
Home page: http://www.omegat.org/
Support center: http://groups.yahoo.com/group/OmegaT/
This file is part of OmegaT.
OmegaT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OmegaT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
**************************************************************************/
package org.omegat.tokenizer;
import java.io.IOException;
import java.io.StringReader;
import java.util.Collections;
import java.util.Set;
import net.moraleboost.io.BasicCodePointReader;
import net.moraleboost.io.CodePointReader;
import net.moraleboost.tinysegmenter.TinySegmenter;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cjk.CJKAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
/**
* A tokenizer based on <a href="http://chasen.org/~taku/software/TinySegmenter/">TinySegmenter by Taku Kudo</a>.
*
* This implementation uses the <a href="http://code.google.com/p/cmecab-java/source/browse/trunk/src/net/moraleboost/tinysegmenter/">
* Java port by Kohei Taketa</a>.
*
* For stopword filtering we borrow the facilities provided
* by {@link CJKAnalyzer} and {@link StopFilter}.
*
* @author Aaron Madlon-Kay
*
*/
@Tokenizer(languages = { "ja" })
public class TinySegmenterJapaneseTokenizer extends BaseTokenizer {
public TinySegmenterJapaneseTokenizer() {
super();
shouldDelegateTokenizeExactly = false;
}
@Override
protected TokenStream getTokenStream(String strOrig, boolean stemsAllowed,
boolean stopWordsAllowed) {
TokenStream ts = new TokenStreamWrapper(new BasicCodePointReader(new StringReader(strOrig)));
if (stemsAllowed) {
Set<?> stopWords = stopWordsAllowed ? CJKAnalyzer.getDefaultStopSet()
: Collections.emptySet();
return new StopFilter(getBehavior(), ts, stopWords);
}
return ts;
}
/**
* Wrap a {@link TinySegmenter} to behave like a {@link TokenStream}.
*/
public static class TokenStreamWrapper extends TokenStream {
private TinySegmenter ts;
private CharTermAttribute termAttr;
private OffsetAttribute offAttr;
public TokenStreamWrapper(CodePointReader reader) {
ts = new TinySegmenter(reader);
termAttr = addAttribute(CharTermAttribute.class);
offAttr = addAttribute(OffsetAttribute.class);
}
public boolean incrementToken() throws IOException {
TinySegmenter.Token token = ts.next();
if (token == null) return false;
termAttr.setEmpty();
termAttr.append(token.str);
offAttr.setOffset((int) token.start, (int) token.end);
return true;
}
}
}
|