Skip to content

Commit

Permalink
Initial Commit
Browse files Browse the repository at this point in the history
  • Loading branch information
Rohit-Karki committed Nov 29, 2023
1 parent 53e13f0 commit fac23ed
Show file tree
Hide file tree
Showing 6 changed files with 81 additions and 37 deletions.
25 changes: 12 additions & 13 deletions main.go → Bitcaspy.go
Original file line number Diff line number Diff line change
@@ -1,12 +1,8 @@
package bitcaspy

import (
"bytes"
"fmt"
"os"
"path/filepath"
"sync"
"time"

"github.com/zerodha/logf"
datafile "rohit.com/internal"
Expand All @@ -17,16 +13,16 @@ const (
HINTS_FILE = "bitcaspy.hints"
)

type BitCaspy struct{
type BitCaspy struct {
sync.RWMutex

lo logf.Logger
lo logf.Logger
bufPool sync.Pool

KeyDir KeyDir
df *datafile.DataFile
stale map[int]*datafile.DataFile
flockF *os.File
df *datafile.DataFile
stale map[int]*datafile.DataFile
flockF *os.File
}

func initLogger(debug bool) logf.Logger {
Expand All @@ -37,10 +33,13 @@ func initLogger(debug bool) logf.Logger {
return logf.New(opts)
}

func Init()(*BitCaspy, error){
func Init() (*BitCaspy, error) {
var (
index = 0
flockF *os.File
stale = map[int]*datafile.DataFile{}

stale = map[int]*datafile.DataFile{}
)

// load existing data files
// file,err :=
}
Empty file added Bitcaspy.txt
Empty file.
4 changes: 2 additions & 2 deletions FLock.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import (
"golang.org/x/sys/unix"
)

func getFLock(flockfile string)(*os.File, error) {
func getFLock(flockfile string) (*os.File, error) {
flockF, err := os.Create(flockfile)
if err != nil {
return nil, fmt.Errorf("cannot create lock file %q: %w", flockF, err)
Expand All @@ -33,4 +33,4 @@ func destroyFLock(flockF *os.File) error {
return fmt.Errorf("cannot remove file %q: %w", flockF.Name(), err)
}
return nil
}
}
2 changes: 1 addition & 1 deletion internal/datafile.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ import (
)

const (
ACTIVE_DATAFILE = "bitcaspy%d.db"
ACTIVE_DATAFILE = "bitcaspy_%d.db"
)

type DataFile struct {
Expand Down
40 changes: 19 additions & 21 deletions ops.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ package bitcaspy
import (
"bytes"
"fmt"
"hash/crc32"
"hash/crc32"
"time"

datafile "rohit.com/internal"
Expand All @@ -27,7 +27,7 @@ func (b *BitCaspy) get(key string) (Record, error) {
}
}

data, err := reader.Read(meta.value_pos,meta.value_sz)
data, err := reader.Read(meta.value_pos, meta.value_sz)
if err != nil {
return Record{}, fmt.Errorf("Error reading the dat from database file %v", err)
}
Expand All @@ -39,37 +39,37 @@ func (b *BitCaspy) get(key string) (Record, error) {

var (
offset = meta.value_pos + meta.value_sz
val = data[offset:]
val = data[offset:]
)
record := Record{
Header: Header,
Key: key,
Value: val,
Key: key,
Value: val,
}
return record, nil
}

func (b *BitCaspy) put(df *datafile.DataFile,Key string, Value []byte, expiryTime *time.Time) error{
func (b *BitCaspy) put(df *datafile.DataFile, Key string, Value []byte, expiryTime *time.Time) error {
// Prepare the header
header := Header{
crc: crc32.ChecksumIEEE(Value),
crc: crc32.ChecksumIEEE(Value),
tstamp: uint32(time.Now().Unix()),
ksz: uint32(len(Key)),
vsz: uint32(len(Value)),
ksz: uint32(len(Key)),
vsz: uint32(len(Value)),
}
if expiryTime != nil {
header.expiry = uint32(expiryTime.Unix())
}else {
} else {
header.expiry = 0
}

// Get the buffer from the pool for writing data.
buf := b.bufPool.Get().(*bytes.Buffer)
defer b.bufPool.Put(buf)

defer buf.Reset()

// Encode the header
// Encode the header
header.Encode(buf)

// Set the keys and values
Expand All @@ -83,15 +83,14 @@ func (b *BitCaspy) put(df *datafile.DataFile,Key string, Value []byte, expiryTim

// Creating the meta objec of the keydir
meta := Meta{
id : df.ID(),
value_sz : len(Value),
id: df.ID(),
value_sz: len(Value),
value_pos: offset,
tstamp : int(header.tstamp),
tstamp: int(header.tstamp),
}

b.KeyDir[Key] = meta


// Ensure that the inmemory data of the buffer is always pushed onto the disk
if err := df.Sync(); err != nil {
return fmt.Errorf("Error syncing the buffer to the disk: %v", err)
Expand All @@ -100,10 +99,9 @@ func (b *BitCaspy) put(df *datafile.DataFile,Key string, Value []byte, expiryTim
}

func (b *BitCaspy) delete(df *datafile.DataFile, Key string) error {
if err := b.put(df,Key,nil,nil);err != nil {
return fmt.Errorf("Error deleting the key: %v", err)
if err := b.put(df, Key, nil, nil); err != nil {
return fmt.Errorf("Error deleting the key: %v", err)
}
delete(b.KeyDir,Key)
delete(b.KeyDir, Key)
return nil

}
}
47 changes: 47 additions & 0 deletions utils.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
package bitcaspy

import (
"fmt"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
)

// Exists returns true if the given path exists on the filesystem.
func exists(path string) bool {
if _, err := os.Stat(path); err != nil {
return false
} else {
return true
}
}

// returns the list of files in the database directory
func getDatFiles(outDir string) ([]string, error) {
if !exists(outDir) {
return nil, fmt.Errorf("Error finding the file %s", outDir)
}

files, err := filepath.Glob(fmt.Sprintf("%s/*.db", outDir))
if err != nil {
return nil, fmt.Errorf("Error getting files from the directory %v", err)
}
return files, nil
}

// Returns the list of sorted ids of the file
func getIds(files []string) ([]int, error) {
ids := make([]int, 0)

for _, file := range files {
id, err := strconv.ParseInt((strings.TrimPrefix(strings.TrimSuffix(filepath.Base(file), ".db"), "bitcaspy_")), 10, 32)
if err != nil {
return nil, fmt.Errorf("Error parsing the files path: %v", err)
}
ids = append(ids, int(id))
}
sort.Ints(ids)
return ids, nil
}

0 comments on commit fac23ed

Please sign in to comment.