Skip to content

Commit

Permalink
feat: pass along the contentType metadata field if supplied
Browse files Browse the repository at this point in the history
This change causes tusd for Go to match the NodeJS version's behavior, discussed here: https://stackoverflow.com/questions/74148196/how-to-resolve-application-octet-stream-in-s3-using-tus-node-tusd-uppy-or-net
  • Loading branch information
mackinleysmith committed Nov 19, 2024
1 parent 9d85248 commit e42ff58
Show file tree
Hide file tree
Showing 2 changed files with 53 additions and 2 deletions.
8 changes: 6 additions & 2 deletions pkg/s3store/s3store.go
Original file line number Diff line number Diff line change
Expand Up @@ -326,11 +326,15 @@ func (store S3Store) NewUpload(ctx context.Context, info handler.FileInfo) (hand

// Create the actual multipart upload
t := time.Now()
res, err := store.Service.CreateMultipartUpload(ctx, &s3.CreateMultipartUploadInput{
multipartUploadInput := &s3.CreateMultipartUploadInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(objectId),
Metadata: metadata,
})
}
if contentType, found := info.MetaData["contentType"]; found {
multipartUploadInput.ContentType = aws.String(contentType)
}
res, err := store.Service.CreateMultipartUpload(ctx, multipartUploadInput)
store.observeRequestDuration(t, metricCreateMultipartUpload)
if err != nil {
return nil, fmt.Errorf("s3store: unable to create multipart upload:\n%s", err)
Expand Down
47 changes: 47 additions & 0 deletions pkg/s3store/s3store_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,53 @@ func TestNewUploadWithMetadataObjectPrefix(t *testing.T) {
assert.NotNil(upload)
}

func TestNewUploadWithContentType(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
assert := assert.New(t)

s3obj := NewMockS3API(mockCtrl)
store := New("bucket", s3obj)

assert.Equal("bucket", store.Bucket)
assert.Equal(s3obj, store.Service)

gomock.InOrder(
s3obj.EXPECT().CreateMultipartUpload(context.Background(), &s3.CreateMultipartUploadInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
ContentType: aws.String("application/pdf"),
Metadata: map[string]string{
"foo": "hello",
"bar": "men???hi",
"contentType": "application/pdf",
},
}).Return(&s3.CreateMultipartUploadOutput{
UploadId: aws.String("multipartId"),
}, nil),
s3obj.EXPECT().PutObject(context.Background(), &s3.PutObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"SizeIsDeferred":false,"Offset":0,"MetaData":{"bar":"menü\r\nhi","contentType":"application/pdf","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`)),
ContentLength: aws.Int64(273),
}),
)

info := handler.FileInfo{
ID: "uploadId",
Size: 500,
MetaData: map[string]string{
"foo": "hello",
"bar": "menü\r\nhi",
"contentType": "application/pdf",
},
}

upload, err := store.NewUpload(context.Background(), info)
assert.Nil(err)
assert.NotNil(upload)
}

// This test ensures that an newly created upload without any chunks can be
// directly finished. There are no calls to ListPart or HeadObject because
// the upload is not fetched from S3 first.
Expand Down

0 comments on commit e42ff58

Please sign in to comment.