all repos — searchix @ 87f2c60692a7ab6a46b969ddd29e1c97e0cd6722

Search engine for NixOS, nix-darwin, home-manager and NUR users

fix: keep names_with_underscores as single tokens Fixes: https://codeberg.org/alanpearce/searchix/issues/2

Alan Pearce
commit

87f2c60692a7ab6a46b969ddd29e1c97e0cd6722

parent

660a81fb1c8f02150f18a143e031844a696e8311

M internal/index/indexer.gointernal/index/indexer.go
@@ -13,6 +13,7 @@ "slices"
"go.alanpearce.eu/searchix/internal/config" "go.alanpearce.eu/searchix/internal/file" + "go.alanpearce.eu/searchix/internal/index/nixattr" "go.alanpearce.eu/searchix/internal/nix" "go.alanpearce.eu/x/log" "go.uber.org/zap"
@@ -81,7 +82,7 @@ err = indexMapping.AddCustomAnalyzer("c_name", map[string]any{
"type": custom.Name, "tokenizer": unicode.Name, "token_filters": []string{ - camelcase.Name, + nixattr.Name, "ngram", }, })
@@ -101,8 +102,22 @@ if err != nil {
return nil, errors.WithMessage(err, "could not add custom analyser") } + err = indexMapping.AddCustomAnalyzer("dotted_keyword", map[string]any{ + "type": custom.Name, + "tokenizer": unicode.Name, + "token_filters": []string{ + nixattr.Name, + }, + }) + if err != nil { + return nil, errors.WithMessage(err, "could not add custom analyser") + } + identityFieldMapping := bleve.NewKeywordFieldMapping() + attributeFieldMapping := bleve.NewKeywordFieldMapping() + attributeFieldMapping.Analyzer = "dotted_keyword" + keywordFieldMapping := bleve.NewKeywordFieldMapping() keywordFieldMapping.Analyzer = simple.Name
@@ -122,7 +137,7 @@ locFieldMapping.Store = false
optionMapping := bleve.NewDocumentStaticMapping() - optionMapping.AddFieldMappingsAt("Name", identityFieldMapping) + optionMapping.AddFieldMappingsAt("Name", attributeFieldMapping) optionMapping.AddFieldMappingsAt("NameNGram", nameNGramMapping) optionMapping.AddFieldMappingsAt("Source", identityFieldMapping) optionMapping.AddFieldMappingsAt("Loc", locFieldMapping)
@@ -136,7 +151,7 @@ packageMapping := bleve.NewDocumentStaticMapping()
packageMapping.AddFieldMappingsAt("Name", keywordFieldMapping) packageMapping.AddFieldMappingsAt("NameNGram", nameNGramMapping) - packageMapping.AddFieldMappingsAt("Attribute", keywordFieldMapping) + packageMapping.AddFieldMappingsAt("Attribute", attributeFieldMapping) packageMapping.AddFieldMappingsAt("AttributeNGram", nameNGramMapping) packageMapping.AddFieldMappingsAt("Source", keywordFieldMapping) packageMapping.AddFieldMappingsAt("Description", descriptionFieldMapping)
A internal/index/nixattr/nixattr.go
@@ -0,0 +1,82 @@
+// Copyright (c) 2016 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nixattr + +import ( + "bytes" + "unicode/utf8" + + "github.com/blevesearch/bleve/v2/analysis" + "github.com/blevesearch/bleve/v2/registry" +) + +const Name = "nixAttr" + +// Filter splits a given token into a set of tokens where each resulting token +// falls into one the following classes: +// 1. Upper case followed by lower case letters. +// Terminated by a number, an upper case letter, and a non alpha-numeric symbol. +// 2. Upper case followed by upper case letters. +// Terminated by a number, an upper case followed by a lower case letter, and a non alpha-numeric symbol. +// 3. Lower case followed by lower case letters. +// Terminated by a number, an upper case letter, and a non alpha-numeric symbol. +// 4. Number followed by numbers. +// Terminated by a letter, and a non alpha-numeric symbol. +// 5. Non alpha-numeric symbol followed by non alpha-numeric symbols. +// Terminated by a number, and a letter. +// +// It does a one-time sequential pass over an input token, from left to right. +// The scan is greedy and generates the longest substring that fits into one of the classes. +// +// See the test file for examples of classes and their parsings. +type Filter struct{} + +func NewFilter() *Filter { + return &Filter{} +} + +func (f *Filter) Filter(input analysis.TokenStream) analysis.TokenStream { + rv := make(analysis.TokenStream, 0, len(input)) + + nextPosition := 1 + for _, token := range input { + runeCount := utf8.RuneCount(token.Term) + runes := bytes.Runes(token.Term) + + p := NewParser(runeCount, nextPosition, token.Start) + for i := 0; i < runeCount; i++ { + if i+1 >= runeCount { + p.Push(runes[i], nil) + } else { + p.Push(runes[i], &runes[i+1]) + } + } + rv = append(rv, p.FlushTokens()...) + nextPosition = p.NextPosition() + } + + return rv +} + +func FilterConstructor( + _ map[string]any, + _ *registry.Cache, +) (analysis.TokenFilter, error) { + return NewFilter(), nil +} + +func init() { + registry.RegisterTokenFilter(Name, FilterConstructor) +}
A internal/index/nixattr/nixattr_test.go
@@ -0,0 +1,92 @@
+// Copyright (c) 2016 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nixattr + +import ( + "reflect" + "testing" + + "github.com/blevesearch/bleve/v2/analysis" +) + +func TestNixAttrFilter(t *testing.T) { + + tests := []struct { + input analysis.TokenStream + output analysis.TokenStream + }{ + { + input: tokenStream(""), + output: tokenStream(""), + }, + { + input: tokenStream("a"), + output: tokenStream("a"), + }, + + { + input: tokenStream("linuxKernel.kernels.linux_6_15"), + output: tokenStream("linux", "Kernel", ".", "kernels", ".", "linux_6_15"), + }, + { + input: tokenStream("Lang"), + output: tokenStream("Lang"), + }, + { + input: tokenStream("GLang"), + output: tokenStream("G", "Lang"), + }, + { + input: tokenStream("GOLang"), + output: tokenStream("GO", "Lang"), + }, + { + input: tokenStream("GOOLang"), + output: tokenStream("GOO", "Lang"), + }, + { + input: tokenStream("1234"), + output: tokenStream("1234"), + }, + { + input: tokenStream("slartibartfast"), + output: tokenStream("slartibartfast"), + }, + } + + for _, test := range tests { + ccFilter := NewFilter() + actual := ccFilter.Filter(test.input) + if !reflect.DeepEqual(actual, test.output) { + t.Errorf("expected %s \n\n got %s", test.output, actual) + } + } +} + +func tokenStream(termStrs ...string) analysis.TokenStream { + tokenStream := make([]*analysis.Token, len(termStrs)) + index := 0 + for i, termStr := range termStrs { + tokenStream[i] = &analysis.Token{ + Term: []byte(termStr), + Position: i + 1, + Start: index, + End: index + len(termStr), + } + index += len(termStr) + } + + return analysis.TokenStream(tokenStream) +}
A internal/index/nixattr/parser.go
@@ -0,0 +1,112 @@
+// Copyright (c) 2016 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nixattr + +import ( + "github.com/blevesearch/bleve/v2/analysis" +) + +func (p *Parser) buildTokenFromTerm(buffer []rune) *analysis.Token { + term := analysis.BuildTermFromRunes(buffer) + token := &analysis.Token{ + Term: term, + Position: p.position, + Start: p.index, + End: p.index + len(term), + } + p.position++ + p.index += len(term) + + return token +} + +// Parser accepts a symbol and passes it to the current state (representing a class). +// The state can accept it (and accumulate it). Otherwise, the parser creates a new state that +// starts with the pushed symbol. +// +// Parser accumulates a new resulting token every time it switches state. +// Use FlushTokens() to get the results after the last symbol was pushed. +type Parser struct { + bufferLen int + buffer []rune + current State + tokens []*analysis.Token + position int + index int +} + +func NewParser(length, position, index int) *Parser { + return &Parser{ + bufferLen: length, + buffer: make([]rune, 0, length), + tokens: make([]*analysis.Token, 0, length), + position: position, + index: index, + } +} + +func (p *Parser) Push(sym rune, peek *rune) { + switch { + case p.current == nil: + // the start of parsing + p.current = p.NewState(sym) + p.buffer = append(p.buffer, sym) + + case p.current.Member(sym, peek): + // same state, just accumulate + p.buffer = append(p.buffer, sym) + + default: + // the old state is no more, thus convert the buffer + p.tokens = append(p.tokens, p.buildTokenFromTerm(p.buffer)) + + // let the new state begin + p.current = p.NewState(sym) + p.buffer = make([]rune, 0, p.bufferLen) + p.buffer = append(p.buffer, sym) + } +} + +// Note. States have to have different starting symbols. +func (p *Parser) NewState(sym rune) State { + var found State + + found = &LowerCaseState{} + if found.StartSym(sym) { + return found + } + + found = &UpperCaseState{} + if found.StartSym(sym) { + return found + } + + found = &NumberCaseState{} + if found.StartSym(sym) { + return found + } + + return &NonAlphaNumericCaseState{} +} + +func (p *Parser) FlushTokens() []*analysis.Token { + p.tokens = append(p.tokens, p.buildTokenFromTerm(p.buffer)) + + return p.tokens +} + +func (p *Parser) NextPosition() int { + return p.position +}
A internal/index/nixattr/states.go
@@ -0,0 +1,93 @@
+// Copyright (c) 2016 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nixattr + +import ( + "unicode" +) + +// States codify the classes that the parser recognizes. +type State interface { + // is _sym_ the start character + StartSym(sym rune) bool + + // is _sym_ a member of a class. + // peek, the next sym on the tape, can also be used to determine a class. + Member(sym rune, peek *rune) bool +} + +func IsNonSplittingSymbolCase(sym rune) bool { + return unicode.IsPunct(sym) && sym != '.' +} + +type LowerCaseState struct{} + +func (s *LowerCaseState) Member(sym rune, _ *rune) bool { + return unicode.IsLower(sym) || IsNonSplittingSymbolCase(sym) || unicode.IsNumber(sym) +} + +func (s *LowerCaseState) StartSym(sym rune) bool { + return s.Member(sym, nil) +} + +type UpperCaseState struct { + startedCollecting bool // denotes that the start character has been read + collectingUpper bool // denotes if this is a class of all upper case letters +} + +func (s *UpperCaseState) Member(sym rune, peek *rune) bool { + if !unicode.IsLower(sym) && !unicode.IsUpper(sym) { + return false + } + + if peek != nil && unicode.IsUpper(sym) && unicode.IsLower(*peek) { + return false + } + + if !s.startedCollecting { + // now we have to determine if upper-case letters are collected. + s.startedCollecting = true + s.collectingUpper = unicode.IsUpper(sym) + + return true + } + + return s.collectingUpper == unicode.IsUpper(sym) +} + +func (s *UpperCaseState) StartSym(sym rune) bool { + return unicode.IsUpper(sym) +} + +type NumberCaseState struct{} + +func (s *NumberCaseState) Member(sym rune, _ *rune) bool { + return unicode.IsNumber(sym) || IsNonSplittingSymbolCase(sym) || unicode.IsLower(sym) +} + +func (s *NumberCaseState) StartSym(sym rune) bool { + return s.Member(sym, nil) +} + +type NonAlphaNumericCaseState struct{} + +func (s *NonAlphaNumericCaseState) Member(sym rune, _ *rune) bool { + return !unicode.IsLower(sym) && !unicode.IsUpper(sym) && !unicode.IsNumber(sym) && + !IsNonSplittingSymbolCase(sym) +} + +func (s *NonAlphaNumericCaseState) StartSym(sym rune) bool { + return s.Member(sym, nil) +}