summaryrefslogtreecommitdiff
path: root/src/org/omegat/tokenizer/LuceneKoreanTokenizer.java
blob: 5c639a71515670471e888a1ee8757f4a51ff0d09 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
/**************************************************************************
 OmegaT - Computer Assisted Translation (CAT) tool 
          with fuzzy matching, translation memory, keyword search, 
          glossaries, and translation leveraging into updated projects.
 
 Copyright (C) 2014 Aaron Madlon-Kay
               Home page: http://www.omegat.org/
               Support center: http://groups.yahoo.com/group/OmegaT/

 This file is part of OmegaT.

 OmegaT is free software: you can redistribute it and/or modify
 it under the terms of the GNU General Public License as published by
 the Free Software Foundation, either version 3 of the License, or
 (at your option) any later version.

 OmegaT is distributed in the hope that it will be useful,
 but WITHOUT ANY WARRANTY; without even the implied warranty of
 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 GNU General Public License for more details.

 You should have received a copy of the GNU General Public License
 along with this program.  If not, see <http://www.gnu.org/licenses/>.
 **************************************************************************/
package org.omegat.tokenizer;

import java.io.StringReader;
import java.util.Collections;
import java.util.Set;

import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.kr.KoreanAnalyzer;
import org.apache.lucene.analysis.kr.KoreanTokenizer;

/**
 * This uses the Korean tokenizer currently under development for inclusion in
 * Lucene (but not yet incorporated).
 * <p>
 * The code quality appears to be poor at the moment (see LUCENE-4956 thread;
 * spurious ArrayIndexOutOfBoundsException errors observable in normal usage
 * within OmegaT) so {@link Tokenizer#isDefault()} is currently
 * <code>false</code>.
 * 
 * @see <a href="http://sourceforge.net/projects/lucenekorean/">SourceForge
 *      project</a>
 * @see <a href="http://cafe.naver.com/korlucene">Korean site</a>
 * @see <a href="https://issues.apache.org/jira/browse/LUCENE-4956">Lucene
 *      issue</a>
 * 
 * @author Aaron Madlon-Kay
 */
@Tokenizer(languages = { "ko" })
public class LuceneKoreanTokenizer extends BaseTokenizer {

    @SuppressWarnings("resource")
    @Override
    protected TokenStream getTokenStream(final String strOrig,
            final boolean stemsAllowed, final boolean stopWordsAllowed) {
        if (stemsAllowed) {
            Set<?> stopWords = stopWordsAllowed ? KoreanAnalyzer.STOP_WORDS_SET
                    : Collections.emptySet();
            return new KoreanAnalyzer(getBehavior(), stopWords).tokenStream("",
            		new StringReader(strOrig));
        } else {
            return new KoreanTokenizer(getBehavior(), new StringReader(strOrig));
        }
    }
}