Lindera
The lindera tokenizer performs dictionary-based morphological analysis. It is designed for Japanese and Korean—languages where words are not separated by spaces and grammatical markers (particles) attach directly to words.
For Chinese text: While lindera supports Chinese via the cc-cedict dictionary, we recommend using the jieba tokenizer instead. Jieba is specifically designed for Chinese word segmentation and provides better results.
Overview
Japanese and Korean are agglutinative languages: grammatical markers called particles attach directly to nouns, forming numerous combinations. For example:
Language | Root word |
| = Combined form | Meaning |
|---|---|---|---|---|
Korean | 서울 (Seoul) | 에서 | 서울에서 | in Seoul |
Japanese | 東京 (Tokyo) | に | 東京に | to Tokyo |
The lindera tokenizer:
-
Segments text into individual morphemes (words and particles)
-
Tags each token with part-of-speech (POS) information from the dictionary
-
Applies filters to remove unwanted tokens (e.g., particles, punctuation)
This two-stage process—segmentation followed by POS-based filtering—enables precise control over which tokens are indexed for search.
Configuration
To configure an analyzer using the lindera tokenizer, set tokenizer.type to lindera, choose a dictionary with dict_kind, and optionally apply filters.
- Python
- Java
- Go
- NodeJS
- cURL
analyzer_params = {
"tokenizer": {
"type": "lindera",
"dict_kind": "ko-dic",
"filter": [
{
"kind": "korean_stop_tags",
"tags": ["SP", "SSC", "SSO", "SC", "SE", "SF", "JKS", "JKC", "JKG", "JKO", "JKB", "JKV", "JKQ", "JX", "JC", "UNK", "EP", "ETM"]
}
]
}
}
Map<String, Object> analyzerParams = new HashMap<>();
analyzerParams.put("tokenizer", new HashMap<String, Object>() {{
put("type", "lindera");
put("dict_kind", "ko-dic");
put("filter", Arrays.asList(
new HashMap<String, Object>() {{
put("kind", "korean_stop_tags");
put("tags", Arrays.asList(
"SP", "SSC", "SSO", "SC", "SE", "SF",
"JKS", "JKC", "JKG", "JKO", "JKB", "JKV", "JKQ",
"JX", "JC", "UNK", "EP", "ETM"
));
}}
));
}});
analyzerParams := map[string]interface{}{
"tokenizer": map[string]interface{}{
"type": "lindera",
"dict_kind": "ko-dic",
"filter": []interface{}{
map[string]interface{}{
"kind": "korean_stop_tags",
"tags": []string{
"SP", "SSC", "SSO", "SC", "SE", "SF",
"JKS", "JKC", "JKG", "JKO", "JKB", "JKV", "JKQ",
"JX", "JC", "UNK", "EP", "ETM",
},
},
},
},
}
const analyzer_params = {
"tokenizer": {
"type": "lindera",
"dict_kind": "ko-dic",
"filter": [
{
"kind": "korean_stop_tags",
"tags": ["SP", "SSC", "SSO", "SC", "SE", "SF", "JKS", "JKC", "JKG", "JKO", "JKB", "JKV", "JKQ", "JX", "JC", "UNK", "EP", "ETM"]
}
]
}
};
# restful
Parameter | Description |
|---|---|
| The type of tokenizer. This is fixed to |
| A dictionary used to define vocabulary. Possible values:
|
| A list of tokenizer-level filters to apply after segmentation. Each filter is an object with:
|
After defining analyzer_params, you can apply them to a VARCHAR field when defining a collection schema. This allows Zilliz Cloud to process the text in that field using the specified analyzer for efficient tokenization and filtering. For details, refer to Example use.
Examples
Before applying the analyzer configuration to your collection schema, verify its behavior using the run_analyzer method.
Korean example
- Python
- Java
- Go
- NodeJS
- cURL
from pymilvus import MilvusClient
client = MilvusClient(uri="YOUR_CLUSTER_ENDPOINT")
analyzer_params = {
"tokenizer": {
"type": "lindera",
"dict_kind": "ko-dic",
"filter": [
{
"kind": "korean_stop_tags",
"tags": ["SP", "SSC", "SSO", "SC", "SE", "SF", "JKS", "JKC", "JKG", "JKO", "JKB", "JKV", "JKQ", "JX", "JC", "UNK", "EP", "ETM"]
}
]
}
}
# Sample Korean text: "서울에서 맛있는 음식을 먹었습니다" (I ate delicious food in Seoul)
sample_text = "서울에서 맛있는 음식을 먹었습니다"
result = client.run_analyzer(sample_text, analyzer_params)
print("Analyzer output:", result)
import io.milvus.v2.client.ConnectConfig;
import io.milvus.v2.client.MilvusClientV2;
import io.milvus.v2.service.vector.request.RunAnalyzerReq;
import io.milvus.v2.service.vector.response.RunAnalyzerResp;
ConnectConfig config = ConnectConfig.builder()
.uri("YOUR_CLUSTER_ENDPOINT")
.build();
MilvusClientV2 client = new MilvusClientV2(config);
Map<String, Object> analyzerParams = new HashMap<>();
analyzerParams.put("tokenizer", new HashMap<String, Object>() {{
put("type", "lindera");
put("dict_kind", "ko-dic");
put("filter", Arrays.asList(
new HashMap<String, Object>() {{
put("kind", "korean_stop_tags");
put("tags", Arrays.asList(
"SP", "SSC", "SSO", "SC", "SE", "SF",
"JKS", "JKC", "JKG", "JKO", "JKB", "JKV", "JKQ",
"JX", "JC", "UNK", "EP", "ETM"
));
}}
));
}});
List<String> texts = new ArrayList<>();
texts.add("서울에서 맛있는 음식을 먹었습니다");
RunAnalyzerResp resp = client.runAnalyzer(RunAnalyzerReq.builder()
.texts(texts)
.analyzerParams(analyzerParams)
.build());
List<RunAnalyzerResp.AnalyzerResult> results = resp.getResults();
import (
"context"
"encoding/json"
"fmt"
"github.com/milvus-io/milvus/client/v2/milvusclient"
)
client, err := milvusclient.New(ctx, &milvusclient.ClientConfig{
Address: "YOUR_CLUSTER_ENDPOINT",
APIKey: "YOUR_CLUSTER_TOKEN",
})
if err != nil {
fmt.Println(err.Error())
// handle error
}
analyzerParams := map[string]interface{}{
"tokenizer": map[string]interface{}{
"type": "lindera",
"dict_kind": "ko-dic",
"filter": []interface{}{
map[string]interface{}{
"kind": "korean_stop_tags",
"tags": []string{
"SP", "SSC", "SSO", "SC", "SE", "SF",
"JKS", "JKC", "JKG", "JKO", "JKB", "JKV", "JKQ",
"JX", "JC", "UNK", "EP", "ETM",
},
},
},
},
}
bs, _ := json.Marshal(analyzerParams)
texts := []string{"서울에서 맛있는 음식을 먹었습니다"}
option := milvusclient.NewRunAnalyzerOption(texts).
WithAnalyzerParams(string(bs))
result, err := client.RunAnalyzer(ctx, option)
if err != nil {
fmt.Println(err.Error())
// handle error
}
import { MilvusClient } from "@zilliz/milvus2-sdk-node";
const client = new MilvusClient({
uri: "YOUR_CLUSTER_ENDPOINT",
});
const analyzer_params = {
tokenizer: {
type: "lindera",
dict_kind: "ko-dic",
filter: [
{
kind: "korean_stop_tags",
tags: [
"SP",
"SSC",
"SSO",
"SC",
"SE",
"SF",
"JKS",
"JKC",
"JKG",
"JKO",
"JKB",
"JKV",
"JKQ",
"JX",
"JC",
"UNK",
"EP",
"ETM",
],
},
],
},
};
const sample_text = "서울에서 맛있는 음식을 먹었습니다";
const result = await client.run_analyzer(sample_text, analyzer_params);
console.log("Analyzer output:", result);
# restful
Expected output:
['서울', '맛있', '음식', '먹', '습니다']
Without korean_stop_tags, the output would include particles like 에서 (in), 는 (topic marker), and 을 (object marker), which are typically not useful for search.
Japanese example
- Python
- Java
- Go
- NodeJS
- cURL
from pymilvus import MilvusClient
client = MilvusClient(uri="YOUR_CLUSTER_ENDPOINT")
analyzer_params = {
"tokenizer": {
"type": "lindera",
"dict_kind": "ipadic",
"filter": [
{
"kind": "japanese_stop_tags",
"tags": ["接続詞", "助詞,格助詞", "助詞,格助詞,一般", "助詞,格助詞,引用", "助詞,格助詞,連語", "助詞,係助詞", "助詞,終助詞", "助詞,接続助詞", "助詞,特殊", "助詞,副助詞", "助詞,副助詞/並立助詞/終助詞", "助詞,連体化", "助詞,副詞化", "助詞,並立助詞", "助動詞", "記号,一般", "記号,読点", "記号,句点", "記号,空白", "記号,括弧閉", "記号,括弧開", "その他,間投", "フィラー", "非言語音"]
}
]
}
}
# Sample Japanese text: "東京スカイツリーの最寄り駅はとうきょうスカイツリー駅です"
sample_text = "東京スカイツリーの最寄り駅はとうきょうスカイツリー駅です"
result = client.run_analyzer(sample_text, analyzer_params)
print("Analyzer output:", result)
// java
// go
import { MilvusClient } from "@zilliz/milvus2-sdk-node";
const client = new MilvusClient({
uri: "YOUR_CLUSTER_ENDPOINT",
});
const analyzer_params = {
"tokenizer": {
"type": "lindera",
"dict_kind": "ipadic",
"filter": [
{
"kind": "japanese_stop_tags",
"tags": ["接続詞", "助詞,格助詞", "助詞,格助詞,一般", "助詞,格助詞,引用", "助詞,格助詞,連語", "助詞,係助詞", "助詞,終助詞", "助詞,接続助詞", "助詞,特殊", "助詞,副助詞", "助詞,副助詞/並立助詞/終助詞", "助詞,連体化", "助詞,副詞化", "助詞,並立助詞", "助動詞", "記号,一般", "記号,読点", "記号,句点", "記号,空白", "記号,括弧閉", "記号,括弧開", "その他,間投", "フィラー", "非言語音"]
}
]
}
}
// Sample Japanese text: "東京スカイツリーの最寄り駅はとうきょうスカイツリー駅です"
const sample_text = "東京スカイツリーの最寄り駅はとうきょうスカイツリー駅です"
const result = await client.run_analyzer(sample_text, analyzer_params);
console.log("Analyzer output:", result);
# restful
Expected output:
['東京', 'スカイ', 'ツリー', '最寄り駅', 'とう', 'きょう', 'スカイ', 'ツリー', '駅']
Without japanese_stop_tags, the output would include particles like の (possessive), は (topic marker), and です (copula).