Skip to content

Commit

Permalink
Operations done next is test
Browse files Browse the repository at this point in the history
  • Loading branch information
Rohit-Karki committed Nov 30, 2023
1 parent 8ccf383 commit ad0ecaf
Show file tree
Hide file tree
Showing 4 changed files with 123 additions and 3 deletions.
Empty file added .github/workflows/release.yml
Empty file.
104 changes: 103 additions & 1 deletion Bitcaspy.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,17 @@ func Init(cfg ...Config) (*BitCaspy, error) {
}
}

// If not in readonly mode, generate a lock file to ensure that only one process is allowed to access the active datafile
if !opts.readOnly {
lockFilePath := filepath.Join(opts.dir, LOCKFILE)
if !exists(lockFilePath) {
_, err := getFLock(lockFilePath)
if err != nil {
return nil, err
}
}
}

// Create a new active datafile
df, err := datafile.New(opts.dir, index)
if err != nil {
Expand Down Expand Up @@ -113,8 +124,99 @@ func Init(cfg ...Config) (*BitCaspy, error) {
go BitCaspy.checkFileSize(BitCaspy.opts.checkFileSizeInterval)

// if BitCaspy.opts.syncInterval != nil{
// go BitCaspy
// go BitCaspy.syn
// }

return BitCaspy, nil
}

func (b *BitCaspy) Close() error {
b.Lock()
defer b.Unlock()

// Generate Hint files from the keydir
if err := b.genrateHintFiles(); err != nil {
fmt.Errorf("Error generating Hint files from keydir: %v", err)
}

// Close the active data file
if err := b.df.Close(); err != nil {
fmt.Errorf("Error closing active data file: %v", err)
}

// Close all the stale data files
for _, df := range b.stale {
if err := df.Close(); err != nil {
fmt.Errorf("Error closing stale data file: %v", err)
}
}
return nil
}

// Gets the key from the keydir and then checks for the key in the keydir hashmap
// Then it goes to the value offset in the data file
func (b *BitCaspy) Get(key string) ([]byte, error) {
record, err := b.get(key)
if err != nil {
return nil, err
}
if record.isExpired() {
return nil, ErrExpiredKey
}
if !record.isValidChecksum() {
return nil, ErrChecksumMismatch
}
return record.Value, nil
}

// puts the key into the active data file and puts the key and inserts in the keyDir hashmap the fileId, vsize and offset at the data file
func (b *BitCaspy) Put(key string, value []byte) error {
if b.opts.readOnly {
return ErrReadOnly
}
//
return b.put(b.df, key, value, nil)
}

func (b *BitCaspy) Delete(key string) error {
b.Lock()
defer b.Unlock()
if b.opts.readOnly {
return ErrReadOnly
}
return b.delete(key)
}

func (b *BitCaspy) list_keys() []string {
b.Lock()
defer b.Unlock()
key_lists := make([]string, 0, len(b.KeyDir))

for key := range b.KeyDir {
key_lists = append(key_lists, key)
}
return key_lists
}

func (b *BitCaspy) Fold(foldingFunc func(key string, value []byte, acc string) error) error {
b.Lock()
defer b.Unlock()

for key, _ := range b.KeyDir {
value, err := b.Get(key)
if err != nil {
return err
}
if err := foldingFunc(key, value, "rohit"); err != nil {
return err
}
}
return nil
}

func (b *BitCaspy) Sync() error {
b.Lock()
defer b.Unlock()

return b.df.Sync()
}
17 changes: 17 additions & 0 deletions erros.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
package bitcaspy

import "errors"

var (
ErrLocked = errors.New("a lockfile already exists")
ErrReadOnly = errors.New("operation not allowed in read only mode")

ErrChecksumMismatch = errors.New("invalid data: checksum does not match")

ErrEmptyKey = errors.New("invalid key: key cannot be empty")
ErrExpiredKey = errors.New("invalid key: key is already expired")
ErrLargeKey = errors.New("invalid key: size cannot be more than 4294967296 bytes")
ErrNoKey = errors.New("invalid key: key is either deleted or expired or unset")

ErrLargeValue = errors.New("invalid value: size cannot be more than 4294967296 bytes")
)
5 changes: 3 additions & 2 deletions ops.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,15 @@ import (
func (b *BitCaspy) get(key string) (Record, error) {
meta, ok := b.KeyDir[key]
if !ok {
return Record{}, nil
return Record{}, ErrNoKey
}

var (
Header Header
reader *datafile.DataFile
)
reader = b.df
// Isnot in Active data file then go to stale data files
if meta.id != b.df.ID() {
reader, ok = b.stale[meta.id]
if !ok {
Expand All @@ -29,7 +30,7 @@ func (b *BitCaspy) get(key string) (Record, error) {

data, err := reader.Read(meta.value_pos, meta.value_sz)
if err != nil {
return Record{}, fmt.Errorf("Error reading the dat from database file %v", err)
return Record{}, fmt.Errorf("Error reading the data from database file %v", err)
}

//Decode the header
Expand Down

0 comments on commit ad0ecaf

Please sign in to comment.