Skip to content

Commit

Permalink
Merge pull request #884 from application-research/to-uint64
Browse files Browse the repository at this point in the history
fix: change will use uint64 to all obj, obj refs and content Ids
  • Loading branch information
alvin-reyes authored Jan 19, 2023
2 parents a94f49c + c2e3fdf commit 3be973d
Show file tree
Hide file tree
Showing 34 changed files with 154 additions and 154 deletions.
2 changes: 1 addition & 1 deletion api/v1/export.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ func (s *apiV1) exportUserData(uid uint) (*DataExport, error) {
return nil, err
}

var conts []uint
var conts []uint64
for _, c := range contents {
conts = append(conts, c.ID)
}
Expand Down
12 changes: 6 additions & 6 deletions api/v1/handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ func serveProfile(c echo.Context) error {
}

type statsResp struct {
ID uint `json:"id"`
ID uint64 `json:"id"`
Cid cid.Cid `json:"cid"`
Filename string `json:"name"`
Size int64 `json:"size"`
Expand Down Expand Up @@ -1073,7 +1073,7 @@ type getContentResponse struct {
Deals []*model.ContentDeal `json:"deals"`
}

func (s *apiV1) calcSelector(aggregatedIn uint, contentID uint) (string, error) {
func (s *apiV1) calcSelector(aggregatedIn uint64, contentID uint64) (string, error) {
// sort the known content IDs aggregated in a CAR, and use the index in the sorted list
// to build the CAR sub-selector

Expand Down Expand Up @@ -1934,7 +1934,7 @@ func (s *apiV1) handleDealStats(c echo.Context) error {
return err
}

sbc := make(map[uint]*contentDealStats)
sbc := make(map[uint64]*contentDealStats)

for _, d := range alldeals {
maddr, err := d.MinerAddr()
Expand Down Expand Up @@ -2061,7 +2061,7 @@ func (s *apiV1) handleRetrievalCheck(c echo.Context) error {
if err != nil {
return err
}
if err := s.retrieveContent(ctx, uint(contid)); err != nil {
if err := s.retrieveContent(ctx, uint64(contid)); err != nil {
return err
}

Expand Down Expand Up @@ -2408,7 +2408,7 @@ func (s *apiV1) handleOffloadContent(c echo.Context) error {
return err
}

removed, err := s.CM.OffloadContents(c.Request().Context(), []uint{uint(cont)})
removed, err := s.CM.OffloadContents(c.Request().Context(), []uint64{uint64(cont)})
if err != nil {
return err
}
Expand Down Expand Up @@ -2457,7 +2457,7 @@ func (s *apiV1) handleRefreshContent(c echo.Context) error {
return err
}

if err := s.CM.RefreshContent(c.Request().Context(), uint(cont)); err != nil {
if err := s.CM.RefreshContent(c.Request().Context(), uint64(cont)); err != nil {
return c.JSON(500, map[string]string{"error": err.Error()})
}
return c.JSON(http.StatusOK, map[string]string{})
Expand Down
4 changes: 2 additions & 2 deletions api/v1/retrieval.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import (
"go.opentelemetry.io/otel/trace"
)

func (s *apiV1) retrievalAsksForContent(ctx context.Context, contid uint) (map[address.Address]*retrievalmarket.QueryResponse, error) {
func (s *apiV1) retrievalAsksForContent(ctx context.Context, contid uint64) (map[address.Address]*retrievalmarket.QueryResponse, error) {
ctx, span := s.tracer.Start(ctx, "retrievalAsksForContent", trace.WithAttributes(
attribute.Int("content", int(contid)),
))
Expand Down Expand Up @@ -55,7 +55,7 @@ func (s *apiV1) retrievalAsksForContent(ctx context.Context, contid uint) (map[a
return out, nil
}

func (s *apiV1) retrieveContent(ctx context.Context, contid uint) error {
func (s *apiV1) retrieveContent(ctx context.Context, contid uint64) error {
ctx, span := s.tracer.Start(ctx, "retrieveContent", trace.WithAttributes(
attribute.Int("content", int(contid)),
))
Expand Down
22 changes: 11 additions & 11 deletions autoretrieve/autoretrieve.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,8 @@ func (autoretrieve *Autoretrieve) AddrInfo() (*peer.AddrInfo, error) {
type PublishedBatch struct {
gorm.Model

FirstContentID uint
Count uint
FirstContentID uint64
Count uint64
AutoretrieveHandle string
}

Expand Down Expand Up @@ -113,17 +113,17 @@ type Provider struct {
db *gorm.DB
advertisementInterval time.Duration
advertiseOffline bool
batchSize uint
batchSize uint64
}

type Iterator struct {
mhs []multihash.Multihash
index uint
firstContentID uint
count uint
firstContentID uint64
count uint64
}

func NewIterator(db *gorm.DB, firstContentID uint, count uint) (*Iterator, error) {
func NewIterator(db *gorm.DB, firstContentID uint64, count uint64) (*Iterator, error) {

// Read CID strings for this content ID
var cidStrings []string
Expand Down Expand Up @@ -293,7 +293,7 @@ func (provider *Provider) Run(ctx context.Context) error {
}

// For each batch that should be advertised...
for firstContentID := uint(0); firstContentID <= lastContent.ID; firstContentID += provider.batchSize {
for firstContentID := uint64(0); firstContentID <= lastContent.ID; firstContentID += provider.batchSize {

// Find the amount of contents in this batch (likely less than
// the batch size if this is the last batch)
Expand Down Expand Up @@ -431,8 +431,8 @@ func (provider *Provider) Stop() error {

type contextParams struct {
provider peer.ID
firstContentID uint
count uint
firstContentID uint64
count uint64
}

// Content ID to context ID
Expand All @@ -458,7 +458,7 @@ func readContextID(contextID []byte) (contextParams, error) {

return contextParams{
provider: peerID,
firstContentID: uint(binary.BigEndian.Uint32(contextID[0:4])),
count: uint(binary.BigEndian.Uint32(contextID[4:8])),
firstContentID: uint64(uint(binary.BigEndian.Uint32(contextID[0:4]))),
count: uint64(uint(binary.BigEndian.Uint32(contextID[4:8]))),
}, nil
}
18 changes: 9 additions & 9 deletions cmd/estuary-shuttle/data.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@ import (
)

type Pin struct {
ID uint `gorm:"primarykey" json:"id"`
ID uint64 `gorm:"primarykey" json:"id"`
CreatedAt time.Time `json:"-"`
UpdatedAt time.Time `json:"-"`

Content uint `gorm:"index"`
Content uint64 `gorm:"index"`

Cid util.DbCID `json:"cid"`
//Name string `json:"name"`
Expand All @@ -30,22 +30,22 @@ type Pin struct {
PinMeta string `json:"pinMeta"`
Failed bool `json:"failed"`

DagSplit bool `json:"dagSplit"`
SplitFrom uint `json:"splitFrom"`
DagSplit bool `json:"dagSplit"`
SplitFrom uint64 `json:"splitFrom"`
}

type Object struct {
ID uint `gorm:"primarykey"`
ID uint64 `gorm:"primarykey"`
Cid util.DbCID `gorm:"index"`
Size int
Size uint64
//Reads int
LastAccess time.Time
}

type ObjRef struct {
ID uint `gorm:"primarykey"`
Pin uint `gorm:"index"`
Object uint `gorm:"index"`
ID uint64 `gorm:"primarykey"`
Pin uint64 `gorm:"index"`
Object uint64 `gorm:"index"`
//Offloaded bool
}

Expand Down
30 changes: 15 additions & 15 deletions cmd/estuary-shuttle/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -460,9 +460,9 @@ func main() {
Tracer: otel.Tracer(fmt.Sprintf("shuttle_%s", cfg.Hostname)),
trackingChannels: make(map[string]*util.ChanTrack),
inflightCids: make(map[cid.Cid]uint),
splitsInProgress: make(map[uint]bool),
aggrInProgress: make(map[uint]bool),
unpinInProgress: make(map[uint]bool),
splitsInProgress: make(map[uint64]bool),
aggrInProgress: make(map[uint64]bool),
unpinInProgress: make(map[uint64]bool),
outgoing: make(chan *rpcevent.Message, cfg.RpcEngine.Websocket.OutgoingQueueSize),
authCache: cache,
hostname: cfg.Hostname,
Expand Down Expand Up @@ -840,13 +840,13 @@ type Shuttle struct {
trackingChannels map[string]*util.ChanTrack

splitLk sync.Mutex
splitsInProgress map[uint]bool
splitsInProgress map[uint64]bool

aggrLk sync.Mutex
aggrInProgress map[uint]bool
aggrInProgress map[uint64]bool

unpinLk sync.Mutex
unpinInProgress map[uint]bool
unpinInProgress map[uint64]bool

addPinLk sync.Mutex

Expand All @@ -866,7 +866,7 @@ type Shuttle struct {
authCache *lru.TwoQueueCache

retrLk sync.Mutex
retrievalsInProgress map[uint]*retrievalProgress
retrievalsInProgress map[uint64]*retrievalProgress

inflightCids map[cid.Cid]uint
inflightCidsLk sync.Mutex
Expand Down Expand Up @@ -1546,7 +1546,7 @@ func (s *Shuttle) addrsForShuttle() []string {
return out
}

func (s *Shuttle) createContent(ctx context.Context, u *User, root cid.Cid, filename string, cic util.ContentInCollection) (uint, error) {
func (s *Shuttle) createContent(ctx context.Context, u *User, root cid.Cid, filename string, cic util.ContentInCollection) (uint64, error) {
log.Debugf("createContent> cid: %v, filename: %s, collection: %+v", root, filename, cic)

data, err := json.Marshal(util.ContentCreateBody{
Expand Down Expand Up @@ -1593,7 +1593,7 @@ func (s *Shuttle) createContent(ctx context.Context, u *User, root cid.Cid, file
return rbody.ID, nil
}

func (s *Shuttle) shuttleCreateContent(ctx context.Context, uid uint, root cid.Cid, filename, collection string, dagsplitroot uint) (uint, error) {
func (s *Shuttle) shuttleCreateContent(ctx context.Context, uid uint, root cid.Cid, filename, collection string, dagsplitroot uint64) (uint64, error) {
var cols []string
if collection != "" {
cols = []string{collection}
Expand Down Expand Up @@ -1677,7 +1677,7 @@ func (d *Shuttle) doPinning(ctx context.Context, op *operation.PinningOperation,
const noDataTimeout = time.Minute * 10

// TODO: mostly copy paste from estuary, dedup code
func (d *Shuttle) addDatabaseTrackingToContent(ctx context.Context, contid uint, dserv ipld.NodeGetter, bs blockstore.Blockstore, root cid.Cid, cb func(int64)) (int64, []*Object, error) {
func (d *Shuttle) addDatabaseTrackingToContent(ctx context.Context, contid uint64, dserv ipld.NodeGetter, bs blockstore.Blockstore, root cid.Cid, cb func(int64)) (int64, []*Object, error) {
ctx, span := d.Tracer.Start(ctx, "computeObjRefsUpdate")
defer span.End()

Expand Down Expand Up @@ -1748,7 +1748,7 @@ func (d *Shuttle) addDatabaseTrackingToContent(ctx context.Context, contid uint,
objlk.Lock()
objects = append(objects, &Object{
Cid: util.DbCID{CID: c},
Size: len(node.RawData()),
Size: uint64(len(node.RawData())),
})

totalSize += int64(len(node.RawData()))
Expand Down Expand Up @@ -1793,7 +1793,7 @@ func (d *Shuttle) addDatabaseTrackingToContent(ctx context.Context, contid uint,
return totalSize, objects, nil
}

func (d *Shuttle) onPinStatusUpdate(cont uint, location string, status types.PinningStatus) error {
func (d *Shuttle) onPinStatusUpdate(cont uint64, location string, status types.PinningStatus) error {
if status == types.PinningStatusFailed {
log.Debugf("updating pin: %d, status: %s, loc: %s", cont, status, location)

Expand Down Expand Up @@ -1923,7 +1923,7 @@ func (s *Shuttle) handleGetNetAddress(c echo.Context) error {
})
}

func (s *Shuttle) Unpin(ctx context.Context, contid uint) error {
func (s *Shuttle) Unpin(ctx context.Context, contid uint64) error {
// only progress if unpin is not already in progress for this content
if !s.markStartUnpin(contid) {
return nil
Expand Down Expand Up @@ -2007,7 +2007,7 @@ func (s *Shuttle) clearUnreferencedObjects(ctx context.Context, objs []*Object)
s.inflightCidsLk.Lock()
defer s.inflightCidsLk.Unlock()

var ids []uint
var ids []uint64
for _, o := range objs {
if !s.isInflight(o.Cid.CID) {
ids = append(ids, o.ID)
Expand Down Expand Up @@ -2279,7 +2279,7 @@ func (s *Shuttle) handleMinerTransferDiagnostics(c echo.Context) error {
}

type garbageCheckBody struct {
Contents []uint `json:"contents"`
Contents []uint64 `json:"contents"`
}

func (s *Shuttle) handleManualGarbageCheck(c echo.Context) error {
Expand Down
Loading

0 comments on commit 3be973d

Please sign in to comment.