diff --git a/rpm/ndb/doc.go b/rpm/ndb/doc.go new file mode 100644 index 000000000..6cf19cdee --- /dev/null +++ b/rpm/ndb/doc.go @@ -0,0 +1,4 @@ +// Package ndb provides support for read-only access to an RPM "ndb" database. +// +// The support for ndb's native indexes is probably unneeded, but is implemented for completeness. +package ndb diff --git a/rpm/ndb/index.go b/rpm/ndb/index.go new file mode 100644 index 000000000..fe77ab5d1 --- /dev/null +++ b/rpm/ndb/index.go @@ -0,0 +1,215 @@ +package ndb + +import ( + "bytes" + "errors" + "fmt" + "io" +) + +// Index is an index over an RPM tag. +type Index struct { + // SlotSpace reads the slot section of the Index. + slotSpace *io.SectionReader + // KeySpace reads the key section of the Index. + keySpace *io.SectionReader + // HMask is the mask for hash keys. + hMask uint32 + + indexHeader +} + +// IndexHeader is the header for a tag index. It's meant to be embedded. +type indexHeader struct { + Generation uint32 + NSlots uint32 + UsedSlots uint32 + DummySlots uint32 + XMask uint32 + KeyEnd uint32 + KeyExcess uint32 +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler for an Index header. +func (i *indexHeader) UnmarshalBinary(b []byte) error { + const ( + magic = ('R' | 'p'<<8 | 'm'<<16 | 'I'<<24) + version = 0 + + offsetMagic = 0 + offsetVersion = 4 + offsetGeneration = 8 + offsetNSlots = 12 + offsetUsedSlots = 16 + offsetDummySlots = 20 + offsetXMask = 24 + offsetKeyEnd = 28 + offsetKeyExcess = 32 + offsetObsolete = 36 + ) + if len(b) < 64 { + return io.ErrShortBuffer + } + if le.Uint32(b[offsetMagic:]) != magic { + return errors.New("ndb: index: bad magic") + } + if le.Uint32(b[offsetVersion:]) != version { + return errors.New("ndb: index: bad version") + } + i.Generation = le.Uint32(b[offsetGeneration:]) + i.NSlots = le.Uint32(b[offsetNSlots:]) + i.UsedSlots = le.Uint32(b[offsetUsedSlots:]) + i.DummySlots = le.Uint32(b[offsetDummySlots:]) + i.XMask = le.Uint32(b[offsetXMask:]) + i.KeyEnd = le.Uint32(b[offsetKeyEnd:]) + i.KeyExcess = le.Uint32(b[offsetKeyExcess:]) + // 4 bytes "obsolete" + // 24 bytes padding + return nil +} + +// IndexPair is the package index and data offset. +type IndexPair struct { + Package uint32 + Data uint32 +} + +// Lookup returns the pair (if any) for the provided key. +func (i *Index) Lookup(s string) (pg []IndexPair, err error) { + // NOTE(hank) This is a pretty straight forward port of the C version. + const ( + slotSize = 8 + skip = ^uint32(0) + + offsetKey = 0 + offsetOffset = 4 + ) + var keyoff, x uint32 + keyh := murmur(s) + b := make([]byte, slotSize) +Look: + for h, hh := keyh&i.hMask, uint32(7); ; h, hh = (h+hh)&i.hMask, hh+1 { + off := int64(8 * h) + if _, err := i.slotSpace.ReadAt(b, off); err != nil { + return pg, fmt.Errorf("ndb: index: failed to read slot@0x%08x: %w", off, err) + } + x = le.Uint32(b) + switch { + case x == 0: + break Look + case x == skip: + continue + } + if keyoff == 0 { + switch { + case ((x ^ keyh) & i.XMask) != 0: + continue + case !i.equalkey(x & ^i.XMask, s): + continue + } + keyoff = x + } + if keyoff != x { + continue + } + data := le.Uint32(b[offsetOffset:]) + var ovldata uint32 + // If flagged for overflow, read the overflow segment: + if data&0x80000000 != 0 { + off += 4 * int64(h) + if _, err := i.slotSpace.ReadAt(b[:4], off); err != nil { + return pg, fmt.Errorf("ndb: index: failed to read overflow slot@0x%08x: %w", off, err) + } + ovldata = le.Uint32(b) + } + pg = append(pg, i.decodeData(data, ovldata)) + } + return pg, nil +} + +func (i *Index) equalkey(keyoff uint32, s string) bool { + if int64(keyoff)+int64(len(s))+1 > i.keySpace.Size() { + return false + } + l := len(s) + var b []byte + switch { + case l < 255: + b = make([]byte, 1+l) + case l < 65535: + b = make([]byte, 3+l) + default: + b = make([]byte, 7+l) + } + n, _ := i.keySpace.ReadAt(b, int64(keyoff)) + b = b[:n] + switch { + case l < 255: + if b[0] != uint8(l) { + return false + } + b = b[1:] + case l < 65535: + if b[0] != 255 || le.Uint16(b[:1]) != uint16(l) { + return false + } + b = b[3:] + default: + if b[0] != 255 || b[1] != 255 || b[2] != 255 || le.Uint32(b[3:]) != uint32(l) { + return false + } + b = b[7:] + } + return bytes.Equal([]byte(s), b) +} + +func (i *Index) decodeData(data, ovldata uint32) (t IndexPair) { + switch { + case (data & 0x80000000) != 0: + t.Data = data ^ 0x80000000 + t.Package = ovldata + case (data & 0x40000000) != 0: + t.Data = (data ^ 0x40000000) >> 24 + t.Package = data & 0xffffff + default: + t.Data = data >> 20 + t.Package = data & 0xfffff + } + return t +} + +func (i *Index) encodeData(pkgIdx, datIdx uint32) (data, ovldata uint32) { + switch { + case (pkgIdx < 0x100000 && datIdx < 0x400): + ovldata = 0 + data = pkgIdx | datIdx<<20 + case (pkgIdx < 0x1000000 && datIdx < 0x40): + ovldata = 0 + data = pkgIdx | datIdx<<24 | 0x40000000 + default: + ovldata = pkgIdx + data = datIdx | 0x80000000 + } + return data, ovldata +} + +// Parse closes over the provided [io.ReaderAt] and populates the provided Index. +func (i *Index) Parse(r io.ReaderAt) error { + const ( + indexSlotOffset = 64 + indexKeyChunksize = 4096 + ) + b := make([]byte, indexSlotOffset) + if _, err := r.ReadAt(b, 0); err != nil { + return fmt.Errorf("ndb: index: unable to read bytes: %w", err) + } + if err := i.indexHeader.UnmarshalBinary(b); err != nil { + return fmt.Errorf("ndb: index: unable to unmarshal header: %w", err) + } + + i.hMask = i.NSlots - 1 + i.slotSpace = io.NewSectionReader(r, indexSlotOffset, int64(i.NSlots)*12) + i.keySpace = io.NewSectionReader(r, indexSlotOffset+(int64(i.NSlots)*12), int64(i.KeyEnd)) + + return nil +} diff --git a/rpm/ndb/index_test.go b/rpm/ndb/index_test.go new file mode 100644 index 000000000..863b5f7d0 --- /dev/null +++ b/rpm/ndb/index_test.go @@ -0,0 +1,32 @@ +package ndb + +import ( + "os" + "testing" + + "github.com/quay/claircore/rpm/internal/rpm" +) + +func TestLoadIndex(t *testing.T) { + idxf, err := os.Open("testdata/Index.db") + if err != nil { + t.Fatal(err) + } + defer idxf.Close() + var xdb XDB + if err := xdb.Parse(idxf); err != nil { + t.Fatal(err) + } + idx, err := xdb.Index(rpm.TagName) + if err != nil { + t.Fatal(err) + } + p, err := idx.Lookup("filesystem") + if err != nil { + t.Fatal(err) + } + t.Logf("%+#v", p) + if p[0].Package != 3 { + t.Fail() + } +} diff --git a/rpm/ndb/murmur.go b/rpm/ndb/murmur.go new file mode 100644 index 000000000..5cadf63d3 --- /dev/null +++ b/rpm/ndb/murmur.go @@ -0,0 +1,29 @@ +package ndb + +// This is a port of the rpm murmur hash, which uses a single constant rather than a few of them. +func murmur(s string) (h uint32) { + const m = 0x5bd1e995 + h = uint32(len(s) * m) + for ; len(s) >= 4; s = s[4:] { + h += uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 | uint32(s[3])<<24 + h *= m + h ^= h >> 16 + } + switch len(s) { + case 3: + h += uint32(s[2]) << 16 + fallthrough + case 2: + h += uint32(s[1]) << 8 + fallthrough + case 1: + h += uint32(s[0]) + h *= m + h ^= h >> 16 + } + h *= m + h ^= h >> 10 + h *= m + h ^= h >> 17 + return h +} diff --git a/rpm/ndb/murmur_test.go b/rpm/ndb/murmur_test.go new file mode 100644 index 000000000..8b6974dda --- /dev/null +++ b/rpm/ndb/murmur_test.go @@ -0,0 +1,8 @@ +package ndb + +import "testing" + +func TestMurmur(t *testing.T) { + x := "file-magic" + t.Logf("%s\t%08x", x, murmur(x)) +} diff --git a/rpm/ndb/ndb.go b/rpm/ndb/ndb.go new file mode 100644 index 000000000..60604e723 --- /dev/null +++ b/rpm/ndb/ndb.go @@ -0,0 +1,164 @@ +package ndb + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/quay/claircore/rpm/internal/rpm" +) + +var le = binary.LittleEndian + +// Used throughout the various DBs. +const ( + slotSize = 4 * 4 + slotStart = 2 +) + +// CheckMagic reports whether the Reader starts with a magic header for +// a file format supported by this package. +func CheckMagic(ctx context.Context, r io.Reader) bool { + const ( + xdb = 'R' | 'p'<<8 | 'm'<<16 | 'X'<<24 + pkg = 'R' | 'p'<<8 | 'm'<<16 | 'P'<<24 + ) + b := make([]byte, 4) + if _, err := io.ReadFull(r, b); err != nil { + return false + } + m := le.Uint32(b) + return m == xdb || m == pkg +} + +// XDB is the "xdb" a.k.a. "Index.db", the ndb mechanism for creating indexes. +type XDB struct { + r io.ReaderAt + lookup map[rpm.Tag]*xdbSlot + slot []xdbSlot + xdbHeader +} + +// Parse closes over the passed [io.ReaderAt] and populates the XDB. +func (db *XDB) Parse(r io.ReaderAt) error { + const headerSize = 32 + h := make([]byte, headerSize) + if _, err := r.ReadAt(h, 0); err != nil { + return fmt.Errorf("xdb: unable to read header: %w", err) + } + if err := db.xdbHeader.UnmarshalBinary(h); err != nil { + return fmt.Errorf("xdb: bad header: %w", err) + } + pg := make([]byte, db.PageSize*db.SlotNPages) + if _, err := r.ReadAt(pg, 0); err != nil { + return fmt.Errorf("xdb: unable to read slots: %w", err) + } + + // Size for full pages of slots. + max := (len(pg) / slotSize) - slotStart + db.lookup = make(map[rpm.Tag]*xdbSlot, max) + db.slot = make([]xdbSlot, max) + n := 0 + var x *xdbSlot + for off := slotStart * slotSize; n < max; n, off = n+1, off+slotSize { + x = &db.slot[n] + if err := x.UnmarshalBinary(pg[off:]); err != nil { + return err + } + if x.Tag == 0 || x.Tag == rpm.TagInvalid { + break + } + db.lookup[x.Tag] = x + } + db.slot = db.slot[:n] + db.r = r + return nil +} + +// Index reports the index for the specifed tag. +func (db *XDB) Index(tag rpm.Tag) (*Index, error) { + slot, ok := db.lookup[tag] + if !ok { + return nil, fmt.Errorf("ndb: no such tag %d", tag) + } + off, ct := int64(slot.StartPage*db.PageSize), int64(slot.PageCount*db.PageSize) + r := io.NewSectionReader(db.r, off, ct) + var idx Index + if err := idx.Parse(r); err != nil { + return nil, err + } + return &idx, nil +} + +type xdbHeader struct { + Version uint32 + Generation uint32 + SlotNPages uint32 + PageSize uint32 + UserGeneration uint32 +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler for the xdb header. +func (h *xdbHeader) UnmarshalBinary(b []byte) error { + const ( + headerSz = 32 + magic = 'R' | 'p'<<8 | 'm'<<16 | 'X'<<24 + version = 0 + + offsetMagic = 0 + offsetVersion = 4 + offsetGeneration = 8 + offsetSlotNPages = 12 + offsetPageSize = 16 + offsetUserGeneration = 20 + ) + + if len(b) < headerSz { + return io.ErrShortBuffer + } + if le.Uint32(b[offsetMagic:]) != magic { + return errors.New("xdb: bad magic") + } + h.Version = le.Uint32(b[offsetVersion:]) + if h.Version != version { + return errors.New("bad version") + } + h.Generation = le.Uint32(b[offsetGeneration:]) + h.SlotNPages = le.Uint32(b[offsetSlotNPages:]) + h.PageSize = le.Uint32(b[offsetPageSize:]) + h.UserGeneration = le.Uint32(b[offsetUserGeneration:]) + return nil +} + +type xdbSlot struct { + Subtag uint8 + Tag rpm.Tag + StartPage uint32 + PageCount uint32 +} + +func (s *xdbSlot) UnmarshalBinary(b []byte) error { + const ( + magic = ('S' | 'l'<<8 | 'o'<<16 | 0x00<<24) + magicMask = ^uint32(0xFF << 24) + + magicOffset = 0 + subtagOffset = 3 + tagOffset = 4 + startOffset = 8 + countOffset = 12 + ) + if len(b) < slotSize { + return io.ErrShortBuffer + } + if le.Uint32(b[magicOffset:])&magicMask != magic { + return fmt.Errorf("slot: bad magic") + } + s.Subtag = b[subtagOffset] + s.Tag = rpm.Tag(le.Uint32(b[tagOffset:])) + s.StartPage = le.Uint32(b[startOffset:]) + s.PageCount = le.Uint32(b[countOffset:]) + return nil +} diff --git a/rpm/ndb/package.go b/rpm/ndb/package.go new file mode 100644 index 000000000..d7a521c99 --- /dev/null +++ b/rpm/ndb/package.go @@ -0,0 +1,279 @@ +package ndb + +import ( + "context" + "errors" + "fmt" + "hash/adler32" + "io" +) + +// Pages are hard-coded to 4096 bytes for the Package database; this +// is different from the Index database, which could have variable page +// sizes. + +// PackageDB is the "pkgdb" a.k.a. "Packages.db", the raw package data. +type PackageDB struct { + r io.ReaderAt + lookup map[uint32]*pkgSlot + slot []pkgSlot + pkgHeader +} + +// Parse closes over the provided [io.ReaderAt] and populates the provided PackageDB. +func (db *PackageDB) Parse(r io.ReaderAt) error { + const ( + headerSz = 4 * 8 + pageSz = 4096 + ) + + // Read and verify the header. + b := make([]byte, headerSz) + if _, err := r.ReadAt(b, 0); err != nil { + return fmt.Errorf("ndb: package: unable to read header: %w", err) + } + if err := db.pkgHeader.UnmarshalBinary(b); err != nil { + return fmt.Errorf("ndb: package: unable to unmarshal header: %w", err) + } + + // Package count should be contiguous. + ct := int(db.NextPkgIdx - 1) + db.lookup = make(map[uint32]*pkgSlot, ct) + db.slot = make([]pkgSlot, 0, ct) + b = b[:slotSize] + // Read every populated slot (these should be contiguous) and populate the lookup table. + for i, off := 0, int64(slotStart*slotSize); i < ct; i, off = i+1, off+slotSize { + if _, err := r.ReadAt(b, off); err != nil { + return fmt.Errorf("ndb: package: unable to read slot %d: %w", i, err) + } + db.slot = append(db.slot, pkgSlot{}) + x := &db.slot[i] + if err := x.UnmarshalBinary(b); err != nil { + return fmt.Errorf("ndb: package: slot %d: unexpected error: %w", i, err) + } + db.lookup[x.Index] = x + } + db.r = r + + return nil +} + +// AllHeaders returns ReaderAts for all RPM headers in the PackageDB. +func (db *PackageDB) AllHeaders(_ context.Context) ([]io.ReaderAt, error) { + r := make([]io.ReaderAt, int(db.NextPkgIdx)-1) + var err error + for i := uint32(1); i < db.NextPkgIdx && err == nil; i++ { + r[int(i-1)], err = db.GetHeader(i) + } + if err != nil { + return nil, err + } + return r, nil +} + +// GetHeader returns an [io.ReaderAt] populated with [rpm.Header] data or +// reports an error. +func (db *PackageDB) GetHeader(pkgID uint32) (io.ReaderAt, error) { + const ( + headerSize = 4 * 4 + trailerSize = 4 * 3 + ) + // Lookup offset and count. + blob, ok := db.lookup[pkgID] + if !ok { + return nil, fmt.Errorf("ndb: package: package id %d does not exist", pkgID) + } + + // Read and verify header. + b := make([]byte, headerSize) + if _, err := db.r.ReadAt(b, blob.Offset()); err != nil { + return nil, fmt.Errorf("ndb: package: error reading header: %w", err) + } + var bh blobHeader + if err := bh.UnmarshalBinary(b); err != nil { + return nil, fmt.Errorf("ndb: package: bad header: %w", err) + } + if bh.Package != pkgID { + return nil, fmt.Errorf("ndb: package: martian blob") + } + + // Read and verify trailer. + if _, err := db.r.ReadAt(b[:trailerSize], blob.Offset()+blob.Count()-trailerSize); err != nil { + return nil, fmt.Errorf("ndb: package: error reading trailer: %w", err) + } + var bt blobTrailer + if err := bt.UnmarshalBinary(b); err != nil { + return nil, fmt.Errorf("ndb: package: bad trailer: %w", err) + } + if bt.Len != bh.Len { + return nil, fmt.Errorf("ndb: package: header/trailer length mismatch") + } + // This is slightly different from the ultimate reader -- the checksum includes any padding. + h := adler32.New() + rd := io.NewSectionReader(db.r, blob.Offset(), blob.Count()-trailerSize) + if _, err := io.Copy(h, rd); err != nil { + panic(err) + } + if got, want := h.Sum32(), bt.Checksum; got != want { + return nil, fmt.Errorf("ndb: package: checksum mismatch; got: 0x%08x, want: 0x%08x", got, want) + } + + return io.NewSectionReader(db.r, blob.Offset()+headerSize, int64(bh.Len)), nil +} + +// PkgHeader is the header for the PackageDB. It's meant to be embedded. +type pkgHeader struct { + Generation uint32 + NPages uint32 + NextPkgIdx uint32 +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler for a PackageDB header. +func (h *pkgHeader) UnmarshalBinary(b []byte) error { + const ( + magic = 'R' | 'p'<<8 | 'm'<<16 | 'P'<<24 + version = 0 + + magicOffset = 0 + versionOffset = 4 + generationOffset = 8 + nPagesOffset = 12 + nextPkgIndexOffset = 16 + ) + if len(b) < 32 { + return io.ErrShortBuffer + } + if le.Uint32(b[magicOffset:]) != magic { + return fmt.Errorf("ndb: package: bad header: bad magic") + } + if le.Uint32(b[versionOffset:]) != version { + return fmt.Errorf("ndb: package: bad header: bad version") + } + + h.Generation = le.Uint32(b[generationOffset:]) + h.NPages = le.Uint32(b[nPagesOffset:]) + h.NextPkgIdx = le.Uint32(b[nextPkgIndexOffset:]) + + return nil +} + +// PkgSlot is a decoded package slot. +type pkgSlot struct { + Index uint32 + blkOffset uint32 + blkCount uint32 +} + +// BlockSize is the size of a blob block. +// +// Blobs are denominated and allocated in blocks. +const blockSize = 16 + +func (s *pkgSlot) GoString() string { + return fmt.Sprintf("blob@%08x[%08x]", s.blkOffset*blockSize, s.blkCount*blockSize) +} + +// Offset reports the byte offset indicated by the slot. +func (s *pkgSlot) Offset() int64 { return int64(s.blkOffset) * blockSize } + +// Count reports the length in bytes of the data in the slot. +func (s *pkgSlot) Count() int64 { return int64(s.blkCount) * blockSize } + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *pkgSlot) UnmarshalBinary(b []byte) error { + const ( + magic = ('S' | 'l'<<8 | 'o'<<16 | 't'<<24) + + magicOffset = 0 + slotIdxOffset = 4 + slotOffsetOffset = 8 + slotCountOffset = 12 + + headerSize = 4 * 4 + trailerSize = 3 * 4 + ) + if len(b) < slotSize { + return io.ErrShortBuffer + } + if le.Uint32(b[magicOffset:]) != magic { + return fmt.Errorf("slot: bad magic") + } + s.Index = le.Uint32(b[slotIdxOffset:]) + s.blkOffset = le.Uint32(b[slotOffsetOffset:]) + s.blkCount = le.Uint32(b[slotCountOffset:]) + // Double-check the blob size. + if s.blkCount < ((headerSize + trailerSize + blockSize - 1) / blockSize) { + return fmt.Errorf("slot: nonsense block count (%d)", s.blkCount) + } + + return nil +} + +// BlobHeader is the header for a blob. +type blobHeader struct { + Package uint32 + Generation uint32 + Len uint32 +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (h *blobHeader) UnmarshalBinary(b []byte) error { + const ( + magic = ('B' | 'l'<<8 | 'b'<<16 | 'S'<<24) + minSize = 4 * 4 + + offsetMagic = 0 + offsetPackage = 4 + offsetGeneration = 8 + offsetLength = 12 + ) + + if len(b) < minSize { + return io.ErrShortBuffer + } + if le.Uint32(b[offsetMagic:]) != magic { + return errors.New("blob: header: bad magic") + } + h.Package = le.Uint32(b[offsetPackage:]) + h.Generation = le.Uint32(b[offsetGeneration:]) + h.Len = le.Uint32(b[offsetLength:]) + + return nil +} + +// BlockCount reports the number of 16-byte blocks this blob occupies. +func (h *blobHeader) BlockCount() uint32 { + const ( + headerSize = 4 * 4 + trailerSize = 3 * 4 + ) + return ((headerSize + h.Len + trailerSize + blockSize) - 1) / blockSize +} + +// BlobTrailer is the trailer (a.k.a. "tail") of a blob. +type blobTrailer struct { + Checksum uint32 + Len uint32 +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (t *blobTrailer) UnmarshalBinary(b []byte) error { + const ( + magic = ('B' | 'l'<<8 | 'b'<<16 | 'E'<<24) + minSize = 3 * 4 + + offsetChecksum = 0 + offsetLength = 4 + offsetMagic = 8 + ) + + if len(b) < minSize { + return io.ErrShortBuffer + } + if le.Uint32(b[offsetMagic:]) != magic { + return errors.New("blob: trailer: bad magic") + } + t.Checksum = le.Uint32(b[offsetChecksum:]) + t.Len = le.Uint32(b[offsetLength:]) + return nil +} diff --git a/rpm/ndb/package_test.go b/rpm/ndb/package_test.go new file mode 100644 index 000000000..681e99b1c --- /dev/null +++ b/rpm/ndb/package_test.go @@ -0,0 +1,42 @@ +package ndb + +import ( + "context" + "os" + "testing" + + "github.com/quay/claircore/rpm/internal/rpm" +) + +func TestLoadPackage(t *testing.T) { + ctx := context.Background() + pkgf, err := os.Open("testdata/Packages.db") + if err != nil { + t.Fatal(err) + } + defer pkgf.Close() + var pkg PackageDB + if err := pkg.Parse(pkgf); err != nil { + t.Fatal(err) + } + rds, err := pkg.AllHeaders(ctx) + if err != nil { + t.Fatal(err) + } + for _, rd := range rds { + var h rpm.Header + if err := h.Parse(ctx, rd); err != nil { + t.Fatal(err) + } + var found bool + for i := range h.Infos { + if h.Infos[i].Tag == rpm.TagName { + found = true + break + } + } + if !found { + t.Error(`missing "name" tag`) + } + } +} diff --git a/rpm/ndb/testdata/Index.db b/rpm/ndb/testdata/Index.db new file mode 100644 index 000000000..ccc647631 Binary files /dev/null and b/rpm/ndb/testdata/Index.db differ diff --git a/rpm/ndb/testdata/Packages.db b/rpm/ndb/testdata/Packages.db new file mode 100644 index 000000000..233f9289e Binary files /dev/null and b/rpm/ndb/testdata/Packages.db differ