all repos — searchix @ 84f1c93bbfea63b316665dd3a0bd696ad7bb1856

Search engine for NixOS, nix-darwin, home-manager and NUR users

perf: use indexer's batch size for storage export

Alan Pearce
commit

84f1c93bbfea63b316665dd3a0bd696ad7bb1856

parent

bf84c891208a42a2bdd42f21f4190fb965de713a

3 files changed, 20 insertions(+), 6 deletions(-)

changed files
M internal/importer/importer.gointernal/importer/importer.go
@@ -21,12 +21,21 @@ func (imp *Importer) indexSource(
ctx context.Context, source *config.Source, ) (bool, error) { + writer := imp.options.WriteIndex var exporter func(context.Context) (<-chan nix.Importable, <-chan error) switch source.Importer { case config.Packages: - exporter = storage.MakeSourceExporter[nix.Package](imp.options.Storage, source) + exporter = storage.MakeSourceExporter[nix.Package]( + imp.options.Storage, + source, + writer.GetBatchSize(), + ) case config.Options: - exporter = storage.MakeSourceExporter[nix.Option](imp.options.Storage, source) + exporter = storage.MakeSourceExporter[nix.Option]( + imp.options.Storage, + source, + writer.GetBatchSize(), + ) } return pipe(
@@ -34,7 +43,7 @@ ctx,
imp.options.Logger, exporter, func(ctx context.Context, objects <-chan nix.Importable) <-chan error { - return imp.options.WriteIndex.Import(ctx, objects) + return writer.Import(ctx, objects) }, ) }
M internal/index/indexer.gointernal/index/indexer.go
@@ -320,6 +320,10 @@ return nil
}) } +func (i *WriteIndex) GetBatchSize() int { + return i.batchSize +} + func (i *WriteIndex) WithBatch( ctx context.Context, objects <-chan nix.Importable,
M internal/storage/store.gointernal/storage/store.go
@@ -209,6 +209,7 @@
func MakeSourceExporter[T nix.Importable]( store *Store, source *config.Source, + batchSize int, ) func(context.Context) (<-chan nix.Importable, <-chan error) { return func(_ context.Context) (<-chan nix.Importable, <-chan error) { results := make(chan nix.Importable, 1)
@@ -219,7 +220,7 @@ defer close(results)
defer close(errs) var obj T - objs := make([]T, 0, BatchSize) + objs := make([]T, 0, batchSize) node := store.From(source.Key) count, err := node.Count(&obj) if err != nil {
@@ -231,8 +232,8 @@
return } - limit := min(BatchSize, count) - for offset := 0; offset < count; offset += BatchSize { + limit := min(batchSize, count) + for offset := 0; offset < count; offset += batchSize { err := node.All(&objs, storm.Skip(offset), storm.Limit(limit)) if err != nil { errs <- fault.Wrap(