Lucene 2.0 教程
中华人民共和国???全国人民???
2006年??
?
??????? /**/ /*? 这里放索引文件的位置? */??
?????? File indexDir? =?? new? File( "c:\\index" );??
?????? Analyzer luceneAnalyzer? =?? new? StandardAnalyzer();??
?????? IndexWriter indexWriter? =?? new? IndexWriter(indexDir, luceneAnalyzer,??
??????????????? true );??
?????? File[] textFiles? =? fileDir.listFiles();??
??????? long? startTime? =?? new? Date().getTime();??
?????????
??????? // 增加document到索引去???
????????? for? ( int? i? =?? 0 ; i? <? textFiles.length; i ++ )?? {??
??????????? if? (textFiles[i].isFile()??
??????????????????? &&? textFiles[i].getName().endsWith( ".txt" ))?? {??
?????????????? System.out.println( " File? "?? +? textFiles[i].getCanonicalPath()??
??????????????????????? +?? " 正在被索引. " );??
?????????????? String temp? =? FileReaderAll(textFiles[i].getCanonicalPath(),??
??????????????????????? "GBK" );??
?????????????? System.out.println(temp);??
?????????????? Document document? =?? new? Document();??
?????????????? Field FieldPath? =?? new? Field( "path " , textFiles[i].getPath(),??
?????????????????????? Field.Store.YES, Field.Index.NO);??
?????????????? Field FieldBody? =?? new? Field( "body" , temp, Field.Store.YES,??
?????????????????????? Field.Index.TOKENIZED,??
?????????????????????? Field.TermVector.WITH_POSITIONS_OFFSETS);??
?????????????? document.add(FieldPath);??
?????????????? document.add(FieldBody);??
?????????????? indexWriter.addDocument(document);??
?????????? }???
?????? }???
??????? // optimize()方法是对索引进行优化???
??????? indexWriter.optimize();??
?????? indexWriter.close();??
?????????
??????? // 测试一下索引的时间???
???????? long? endTime? =?? new? Date().getTime();??
?????? System.out??
?????????????? .println( " 这花费了 "??
??????????????????????? +? (endTime? -? startTime)??
??????????????????????? +?? "? 毫秒来把文档增加到索引里面去! "??
??????????????????????? +? fileDir.getPath());??
?? }???
?
??? public?? static? String FileReaderAll(String FileName, String charset)??
??????????? throws? IOException?? {??
?????? BufferedReader reader? =?? new? BufferedReader( new? InputStreamReader(??
??????????????? new? FileInputStream(FileName), charset));??
?????? String line? =?? new? String();??
?????? String temp? =?? new? String();??
?????????
??????? while? ((line? =? reader.readLine())? !=?? null )?? {??
?????????? temp? +=? line;??
?????? }???
?????? reader.close();??
??????? return? temp;??
?? }???
}??
File?C:\s\ 1 .txt正在被索引.???
中华人民共和国全国人民2006年???
File?C:\s\ 2 .txt正在被索引
.???
中华人民共和国全国人民2006年???
File?C:\s\ 3 .txt正在被索引
.???
中华人民共和国全国人民2006年???
这花费了297?毫秒来把文档增加到索引里面去 ! c:\s??
?????? Query query? =?? null ;??
?????? IndexSearcher searcher? =?? new? IndexSearcher( "c:\\index" );??
?
?????? Analyzer analyzer? =?? new? StandardAnalyzer();??
??????? try??? {??
?????????? QueryParser qp? =?? new? QueryParser( "body" , analyzer);??
?????????? query? =? qp.parse(queryString);??
?????? }?? catch? (ParseException e)?? {??
?????? }???
??????? if? (searcher? !=?? null )?? {??
?????????? hits? =? searcher.search(query);??
??????????? if? (hits.length()? >?? 0 )?? {??
?????????????? System.out.println( " 找到: "?? +? hits.length()? +?? "? 个结果! " );??
?????????? }???
?????? }???
?? }?
?
}??
找到: 3 ?个结果 !
package ?lighter.javaeye.com;?????
import ?java.io.IOException;???
import ?java.io.StringReader;???
??
import ?org.apache.lucene.analysis.Analyzer;???
import ?org.apache.lucene.analysis.Token;???
import ?org.apache.lucene.analysis.TokenStream;???
import ?org.apache.lucene.analysis.standard.StandardAnalyzer;???
??
public ? class ?StandardAnalyzerTest????
{???
???? // 构造函数,???
???? public ?StandardAnalyzerTest()???
????
{???
????} ???
???? public ? static ? void ?main(String[]?args)????
????
{???
???????? // 生成一个StandardAnalyzer对象???
????????Analyzer?aAnalyzer? = ? new ?StandardAnalyzer();???
???????? // 测试字符串???
????????StringReader?sr? = ? new ?StringReader( " lighter?javaeye?com?is?the?are?on " );???
???????? // 生成TokenStream对象???
????????TokenStream?ts? = ?aAnalyzer.tokenStream( " name " ,?sr);????
???????? try ?
{???
???????????? int ?i = 0 ;???
????????????Token?t? = ?ts.next();???
???????????? while (t != null )???
????????????
{???
???????????????? // 辅助输出时显示行号???
????????????????i ++ ;???
???????????????? // 输出处理后的字符???
????????????????System.out.println( " 第 " + i + " 行: " + t.termText());???
???????????????? // 取得下一个字符???
????????????????t = ts.next();???
????????????} ???
????????} ? catch ?(IOException?e)?
{???
????????????e.printStackTrace();???
????????} ???
????} ???
} ???
第1行:lighter?第2行:javaeye?
第3行:com
package ?lighter.javaeye.com;??? import ?org.apache.lucene.analysis.standard.StandardAnalyzer;???
import ?org.apache.lucene.document.Document;???
import ?org.apache.lucene.document.Field;???
import ?org.apache.lucene.index.IndexWriter;???
import ?org.apache.lucene.queryParser.QueryParser;???
import ?org.apache.lucene.search.Hits;???
import ?org.apache.lucene.search.IndexSearcher;???
import ?org.apache.lucene.search.Query;???
import ?org.apache.lucene.store.FSDirectory;???
??
public ? class ?FSDirectoryTest?
{???
??
???? // 建立索引的路径???
???? public ? static ? final ?String?path? = ? " c:\\index2 " ;???
??
???? public ? static ? void ?main(String[]?args)? throws ?Exception?
{???
????????Document?doc1? = ? new ?Document();???
????????doc1.add(? new ?Field( " name " ,? " lighter?javaeye?com " ,Field.Store.YES,Field.Index.TOKENIZED));???
??
????????Document?doc2? = ? new ?Document();???
????????doc2.add( new ?Field( " name " ,? " lighter?blog " ,Field.Store.YES,Field.Index.TOKENIZED));???
??
????????IndexWriter?writer? = ? new ?IndexWriter(FSDirectory.getDirectory(path,? true ),? new ?StandardAnalyzer(),? true );???
????????writer.setMaxFieldLength( 3 );???
????????writer.addDocument(doc1);???
????????writer.setMaxFieldLength( 3 );???
????????writer.addDocument(doc2);???
????????writer.close();???
??
????????IndexSearcher?searcher? = ? new ?IndexSearcher(path);???
????????Hits?hits? = ? null ;???
????????Query?query? = ? null ;???
????????QueryParser?qp? = ? new ?QueryParser( " name " , new ?StandardAnalyzer());???
???????????
????????query? = ?qp.parse( " lighter " );???
????????hits? = ?searcher.search(query);???
????????System.out.println( " 查找\ " lighter\ " ?共 " ? + ?hits.length()? + ? " 个结果 " );???
??
????????query? = ?qp.parse( " javaeye " );???
????????hits? = ?searcher.search(query);???
????????System.out.println( " 查找\ " javaeye\ " ?共 " ? + ?hits.length()? + ? " 个结果 " );???
??
????} ???
??
} ??
查找 " lighter " ?共2个结果???查找 " javaeye " ?共1个结果?
FSDirectory.getDirectory(File?file,? boolean ?create)FSDirectory.getDirectory(String?path,? boolean ?create)
IndexWriter(Directory?d,?Analyzer?a,? boolean ?create)
IndexWrtier?indexWriter? = ? new ?IndexWriter(FSDirectory.getDirectory(“c:\\index”, true ), new ?StandardAnlyazer(), true );IndexWrtier?indexWriter? = ? new ?IndexWriter( new ?RAMDirectory(), new ?StandardAnlyazer(), true );
IndexWriter.addIndexes(Directory[]?dirs)
public ? void ?UniteIndex()? throws ?IOException ????
{
????????IndexWriter?writerDisk? = ? new ?IndexWriter(FSDirectory.getDirectory( " c:\\indexDisk " ,? true ), new ?StandardAnalyzer(), true );
????????Document?docDisk? = ? new ?Document();
????????docDisk.add( new ?Field( " name " , " 程序员之家 " ,Field.Store.YES,Field.Index.TOKENIZED));
????????writerDisk.addDocument(docDisk);
????????RAMDirectory?ramDir? = ? new ?RAMDirectory();
????????IndexWriter?writerRam? = ? new ?IndexWriter(ramDir, new ?StandardAnalyzer(), true );
????????Document?docRam? = ? new ?Document();
????????docRam.add( new ?Field( " name " , " 程序员杂志 " ,Field.Store.YES,Field.Index.TOKENIZED));
????????writerRam.addDocument(docRam);
????????writerRam.close(); // 这个方法非常重要,是必须调用的
????????writerDisk.addIndexes( new ?Directory[]
{ramDir} );
????????writerDisk.close();
????}
???? public ? void ?UniteSearch()? throws ?ParseException,?IOException
????
{
????????QueryParser?queryParser? = ? new ?QueryParser( " name " , new ?StandardAnalyzer());
????????Query?query? = ?queryParser.parse( " 程序员 " );
????????IndexSearcher?indexSearcher? = new ?IndexSearcher( " c:\\indexDisk " );
????????Hits?hits? = ?indexSearcher.search(query);
????????System.out.println( " 找到了 " + hits.length() + " 结果 " );
???????? for ( int ?i = 0 ;i
????????
{
????????????Document?doc? = ?hits.doc(i);
????????????System.out.println(doc.get( " name " ));
????????}
}
1 . new ?QueryParser(Field字段, new ?分析器) 2 .Query?query? = ?QueryParser.parser(“要查询的字串”);这个地方我们可以用反射api看一下query究竟是什么类型
3 . new ?IndexSearcher(索引目录).search(query);返回Hits
4 .用Hits.doc(n);可以遍历出Document
5 .用Document可得到Field的具体信息了。
QueryParser?queryParser? = ? new ?QueryParser( " name " , new ?StandardAnalyzer());????????Query?query? = ?queryParser.parse( " 程序员 " );
/**/ /* 这里返回的就是org.apache.lucene.search.PhraseQuery */
????????IndexSearcher?indexSearcher? = new ?IndexSearcher( " c:\\indexDisk " );
????????Hits?hits? = ?indexSearcher.search(query);
public ? void ?IndexSort()? throws ?IOException
{
????????IndexWriter?writer? = ? new ?IndexWriter( " C:\\indexStore " , new ?StandardAnalyzer(), true );
????????Document?doc? = ? new ?Document()
????????doc.add( new ?Field( " sort " , " 1 " ,Field.Store.YES,Field.Index.TOKENIZED));
????????writer.addDocument(doc);
????????doc? = ? new ?Document();
????????doc.add( new ?Field( " sort " , " 4 " ,Field.Store.YES,Field.Index.TOKENIZED));
????????writer.addDocument(doc);
????????doc? = ? new ?Document();
????????doc.add( new ?Field( " sort " , " 3 " ,Field.Store.YES,Field.Index.TOKENIZED));
????????writer.addDocument(doc);
????????doc? = ? new ?Document();
????????doc.add( new ?Field( " sort " , " 5 " ,Field.Store.YES,Field.Index.TOKENIZED));
????????writer.addDocument(doc);
????????doc? = ? new ?Document();
????????doc.add( new ?Field( " sort " , " 9 " ,Field.Store.YES,Field.Index.TOKENIZED));
????????writer.addDocument(doc);
????????doc? = ? new ?Document();
????????doc.add( new ?Field( " sort " , " 6 " ,Field.Store.YES,Field.Index.TOKENIZED));
????????writer.addDocument(doc);
????????doc? = ? new ?Document();
????????doc.add( new ?Field( " sort " , " 7 " ,Field.Store.YES,Field.Index.TOKENIZED));
????????writer.addDocument(doc);
????????writer.close();
}
下面是搜索的例子:
[code]
public void SearchSort1() throws IOException, ParseException
{
??????? IndexSearcher indexSearcher = new IndexSearcher("C:\\indexStore");
??????? QueryParser queryParser = new QueryParser("sort",new StandardAnalyzer());
??????? Query query = queryParser.parse("4");
????? ?
??????? Hits hits = indexSearcher.search(query);
??????? System.out.println("有"+hits.length()+"个结果");
??????? Document doc = hits.doc(0);
??????? System.out.println(doc.get("sort"));
}
public void SearchSort2() throws IOException, ParseException
{
??????? IndexSearcher indexSearcher = new IndexSearcher("C:\\indexStore");
??????? Query query = new RangeQuery(new Term("sort","1"),new Term("sort","9"),true);//这个地方前面没有提到,它是用于范围的Query可以看一下帮助文档.
??????? Hits hits = indexSearcher.search(query,new Sort(new SortField("sort",new MySortComparatorSource())));
??????? System.out.println("有"+hits.length()+"个结果");
??????? for(int i=0;i
??????? {
??????????? Document doc = hits.doc(i);
??????????? System.out.println(doc.get("sort"));
??????? }
}
public class MyScoreDocComparator implements ScoreDocComparator
{
??? private Integer[]sort;
??? public MyScoreDocComparator(String s,IndexReader reader, String fieldname) throws IOException
??? {
??????? sort = new Integer[reader.maxDoc()];
??????? for(int i = 0;i
??????? {
??????????? Document doc =reader.document(i);
??????????? sort[i]=new Integer(doc.get("sort"));
??????? }
??? }
??? public int compare(ScoreDoc i, ScoreDoc j)
??? {
??????? if(sort[i.doc]>sort[j.doc])
??????????? return 1;
??????? if(sort[i.doc]
??????????? return -1;
??????? return 0;
??? }
??? public int sortType()
??? {
??????? return SortField.INT;
??? }
??? public Comparable sortValue(ScoreDoc i)
??? {
??????? // TODO 自动生成方法存根
??????? return new Integer(sort[i.doc]);
??? }
}
public class MySortComparatorSource implements SortComparatorSource
{
??? private static final long serialVersionUID = -9189690812107968361L;
??? public ScoreDocComparator newComparator(IndexReader reader, String fieldname)
??????????? throws IOException
??? {
??????? if(fieldname.equals("sort"))
??????????? return new MyScoreDocComparator("sort",reader,fieldname);
??????? return null;
??? }
}[/code]
SearchSort1()输出的结果没有排序,SearchSort2()就排序了。
2.多域搜索MultiFieldQueryParser
如果想输入关键字而不想关心是在哪个Field里的就可以用MultiFieldQueryParser了
用它的构造函数即可后面的和一个Field一样。
MultiFieldQueryParser. parse(String[] queries, String[] fields, BooleanClause.Occur[] flags, Analyzer analyzer)????????????????????????????????????????? ~~~~~~~~~~~~~~~~~
第三个参数比较特殊这里也是与以前lucene1.4.3不一样的地方
看一个例子就知道了
String[] fields = {"filename", "contents", "description"};
?BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
??????????????? BooleanClause.Occur.MUST,//在这个Field里必须出现的
??????????????? BooleanClause.Occur.MUST_NOT};//在这个Field里不能出现
?MultiFieldQueryParser.parse("query", fields, flags, analyzer);
1、lucene的索引不能太大,要不然效率会很低。大于1G的时候就必须考虑分布索引的问题
2、不建议用多线程来建索引,产生的互锁问题很麻烦。经常发现索引被lock,无法重新建立的情况
3、中文分词是个大问题,目前免费的分词效果都很差。如果有能力还是自己实现一个分词模块,用最短路径的切分方法,网上有教材和demo源码,可以参考。
4、建增量索引的时候很耗cpu,在访问量大的时候会导致cpu的idle为0
5、默认的评分机制不太合理,需要根据自己的业务定制
?
整体来说lucene要用好不容易,必须在上述方面扩充他的功能,才能作为一个商用的搜索引擎