Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/dev'
Browse files Browse the repository at this point in the history
  • Loading branch information
yahavi committed May 9, 2024
2 parents 964c3d4 + 4b9c587 commit d0fbc99
Show file tree
Hide file tree
Showing 17 changed files with 182 additions and 95 deletions.
2 changes: 2 additions & 0 deletions README.md
Expand Up @@ -424,6 +424,8 @@ params.SplitCount = 10
// The minimum file size in MiB required to attempt a multi-part upload.
// MinSplitSize default value: 200
params.MinSplitSize = 100
// The upload chunk size in MiB that can be concurrently uploaded during a multi-part upload.
params.ChunkSize = 5
// The min file size in bytes for "checksum deploy".
// "Checksum deploy" is the action of calculating the file checksum locally, before
// the upload, and skipping the actual file transfer if the file already
Expand Down
6 changes: 3 additions & 3 deletions artifactory/services/download.go
Expand Up @@ -389,7 +389,7 @@ func (ds *DownloadService) downloadFile(downloadFileDetails *httpclient.Download
return err
}

log.Debug(logMsgPrefix, "Artifactory response:", resp.Status)
log.Debug(logMsgPrefix+"Artifactory response:", resp.Status)
return errorutils.CheckResponseStatus(resp, http.StatusOK)
}

Expand Down Expand Up @@ -468,7 +468,7 @@ func createLocalSymlink(localPath, localFileName, symlinkArtifact string, symlin
if errorutils.CheckError(err) != nil {
return err
}
log.Debug(logMsgPrefix, "Creating symlink file.")
log.Debug(logMsgPrefix + "Creating symlink file.")
return nil
}

Expand Down Expand Up @@ -537,7 +537,7 @@ func (ds *DownloadService) downloadFileIfNeeded(downloadPath, localPath, localFi
return e
}
if isEqual {
log.Debug(logMsgPrefix, "File already exists locally.")
log.Debug(logMsgPrefix + "File already exists locally.")
if ds.Progress != nil {
ds.Progress.IncrementGeneralProgress()
}
Expand Down
9 changes: 6 additions & 3 deletions artifactory/services/upload.go
Expand Up @@ -619,7 +619,8 @@ func (us *UploadService) doUpload(artifact UploadData, targetUrlWithProps, logMs
return
}
if shouldTryMultipart {
if err = us.MultipartUpload.UploadFileConcurrently(artifact.Artifact.LocalPath, artifact.Artifact.TargetPath, fileInfo.Size(), details.Checksum.Sha1, us.Progress, uploadParams.SplitCount); err != nil {
if err = us.MultipartUpload.UploadFileConcurrently(artifact.Artifact.LocalPath, artifact.Artifact.TargetPath,
fileInfo.Size(), details.Checksum.Sha1, us.Progress, uploadParams.SplitCount, uploadParams.ChunkSize); err != nil {
return
}
// Once the file is uploaded to the storage, we finalize the multipart upload by performing a checksum deployment to save the file in Artifactory.
Expand Down Expand Up @@ -664,7 +665,7 @@ func logUploadResponse(logMsgPrefix string, resp *http.Response, body []byte, ch
} else {
strChecksumDeployed = ""
}
log.Debug(logMsgPrefix, "Artifactory response:", resp.Status, strChecksumDeployed)
log.Debug(logMsgPrefix+"Artifactory response:", resp.Status, strChecksumDeployed)
}
}

Expand Down Expand Up @@ -709,14 +710,16 @@ type UploadParams struct {
MinChecksumDeploy int64
MinSplitSize int64
SplitCount int
ChunkSize int64
ChecksumsCalcEnabled bool
Archive string
// When using the 'archive' option for upload, we can control the target path inside the uploaded archive using placeholders. This operation determines the TargetPathInArchive value.
TargetPathInArchive string
}

func NewUploadParams() UploadParams {
return UploadParams{CommonParams: &utils.CommonParams{}, MinChecksumDeploy: DefaultMinChecksumDeploy, ChecksumsCalcEnabled: true, MinSplitSize: defaultUploadMinSplit, SplitCount: defaultUploadSplitCount}
return UploadParams{CommonParams: &utils.CommonParams{}, MinChecksumDeploy: DefaultMinChecksumDeploy,
ChecksumsCalcEnabled: true, MinSplitSize: defaultUploadMinSplit, SplitCount: defaultUploadSplitCount, ChunkSize: utils.DefaultUploadChunkSize}
}

func DeepCopyUploadParams(params *UploadParams) UploadParams {
Expand Down
46 changes: 24 additions & 22 deletions artifactory/services/utils/multipartupload.go
Expand Up @@ -54,7 +54,7 @@ const (

// Sizes and limits constants
MaxMultipartUploadFileSize = SizeTiB * 5
uploadPartSize int64 = SizeMiB * 20
DefaultUploadChunkSize int64 = SizeMiB * 20

// Retries and polling constants
retriesInterval = time.Second * 5
Expand Down Expand Up @@ -122,13 +122,14 @@ type getConfigResponse struct {
Supported bool `json:"supported,omitempty"`
}

func (mu *MultipartUpload) UploadFileConcurrently(localPath, targetPath string, fileSize int64, sha1 string, progress ioutils.ProgressMgr, splitCount int) (err error) {
func (mu *MultipartUpload) UploadFileConcurrently(localPath, targetPath string, fileSize int64, sha1 string,
progress ioutils.ProgressMgr, splitCount int, chunkSize int64) (err error) {
repoAndPath := strings.SplitN(targetPath, "/", 2)
repoKey := repoAndPath[0]
repoPath := repoAndPath[1]
logMsgPrefix := fmt.Sprintf("[Multipart upload %s] ", repoPath)

token, err := mu.createMultipartUpload(repoKey, repoPath, calculatePartSize(fileSize, 0))
token, err := mu.createMultipartUpload(repoKey, repoPath, calculatePartSize(fileSize, 0, chunkSize))
if err != nil {
return
}
Expand All @@ -154,7 +155,7 @@ func (mu *MultipartUpload) UploadFileConcurrently(localPath, targetPath string,
}
}()

if err = mu.uploadPartsConcurrently(logMsgPrefix, fileSize, splitCount, localPath, progressReader, multipartUploadClient); err != nil {
if err = mu.uploadPartsConcurrently(logMsgPrefix, fileSize, chunkSize, splitCount, localPath, progressReader, multipartUploadClient); err != nil {
return
}

Expand All @@ -175,9 +176,9 @@ func (mu *MultipartUpload) UploadFileConcurrently(localPath, targetPath string,
return mu.completeAndPollForStatus(logMsgPrefix, uint(mu.client.GetHttpClient().GetRetries())+1, sha1, multipartUploadClient, progressReader)
}

func (mu *MultipartUpload) uploadPartsConcurrently(logMsgPrefix string, fileSize int64, splitCount int, localPath string, progressReader ioutils.Progress, multipartUploadClient *httputils.HttpClientDetails) (err error) {
numberOfParts := calculateNumberOfParts(fileSize)
log.Info(fmt.Sprintf("%sSplitting file to %d parts, using %d working threads for uploading...", logMsgPrefix, numberOfParts, splitCount))
func (mu *MultipartUpload) uploadPartsConcurrently(logMsgPrefix string, fileSize, chunkSize int64, splitCount int, localPath string, progressReader ioutils.Progress, multipartUploadClient *httputils.HttpClientDetails) (err error) {
numberOfParts := calculateNumberOfParts(fileSize, chunkSize)
log.Info(fmt.Sprintf("%sSplitting file to %d parts of %s each, using %d working threads for uploading...", logMsgPrefix, numberOfParts, ConvertIntToStorageSizeString(chunkSize), splitCount))
producerConsumer := parallel.NewRunner(splitCount, uint(numberOfParts), false)

wg := new(sync.WaitGroup)
Expand All @@ -186,7 +187,7 @@ func (mu *MultipartUpload) uploadPartsConcurrently(logMsgPrefix string, fileSize
attemptsAllowed.Add(uint64(numberOfParts) * uint64(mu.client.GetHttpClient().GetRetries()))
go func() {
for i := 0; i < int(numberOfParts); i++ {
if err = mu.produceUploadTask(producerConsumer, logMsgPrefix, localPath, fileSize, numberOfParts, int64(i), progressReader, multipartUploadClient, attemptsAllowed, wg); err != nil {
if err = mu.produceUploadTask(producerConsumer, logMsgPrefix, localPath, fileSize, numberOfParts, int64(i), chunkSize, progressReader, multipartUploadClient, attemptsAllowed, wg); err != nil {
return
}
}
Expand All @@ -202,9 +203,9 @@ func (mu *MultipartUpload) uploadPartsConcurrently(logMsgPrefix string, fileSize
return
}

func (mu *MultipartUpload) produceUploadTask(producerConsumer parallel.Runner, logMsgPrefix, localPath string, fileSize, numberOfParts, partId int64, progressReader ioutils.Progress, multipartUploadClient *httputils.HttpClientDetails, attemptsAllowed *atomic.Uint64, wg *sync.WaitGroup) (retErr error) {
func (mu *MultipartUpload) produceUploadTask(producerConsumer parallel.Runner, logMsgPrefix, localPath string, fileSize, numberOfParts, partId, chunkSize int64, progressReader ioutils.Progress, multipartUploadClient *httputils.HttpClientDetails, attemptsAllowed *atomic.Uint64, wg *sync.WaitGroup) (retErr error) {
_, retErr = producerConsumer.AddTaskWithError(func(int) error {
uploadErr := mu.uploadPart(logMsgPrefix, localPath, fileSize, partId, progressReader, multipartUploadClient)
uploadErr := mu.uploadPart(logMsgPrefix, localPath, fileSize, partId, chunkSize, progressReader, multipartUploadClient)
if uploadErr == nil {
log.Info(fmt.Sprintf("%sCompleted uploading part %d/%d", logMsgPrefix, partId+1, numberOfParts))
wg.Done()
Expand All @@ -220,25 +221,25 @@ func (mu *MultipartUpload) produceUploadTask(producerConsumer parallel.Runner, l

// Sleep before trying again
time.Sleep(retriesInterval)
if err := mu.produceUploadTask(producerConsumer, logMsgPrefix, localPath, fileSize, numberOfParts, partId, progressReader, multipartUploadClient, attemptsAllowed, wg); err != nil {
if err := mu.produceUploadTask(producerConsumer, logMsgPrefix, localPath, fileSize, numberOfParts, partId, chunkSize, progressReader, multipartUploadClient, attemptsAllowed, wg); err != nil {
retErr = err
}
})
return
}

func (mu *MultipartUpload) uploadPart(logMsgPrefix, localPath string, fileSize, partId int64, progressReader ioutils.Progress, multipartUploadClient *httputils.HttpClientDetails) (err error) {
func (mu *MultipartUpload) uploadPart(logMsgPrefix, localPath string, fileSize, partId, chunkSize int64, progressReader ioutils.Progress, multipartUploadClient *httputils.HttpClientDetails) (err error) {
file, err := os.Open(localPath)
if err != nil {
return errorutils.CheckError(err)
}
defer func() {
err = errors.Join(err, errorutils.CheckError(file.Close()))
}()
if _, err = file.Seek(partId*uploadPartSize, io.SeekStart); err != nil {
if _, err = file.Seek(partId*chunkSize, io.SeekStart); err != nil {
return errorutils.CheckError(err)
}
partSize := calculatePartSize(fileSize, partId)
partSize := calculatePartSize(fileSize, partId, chunkSize)

limitReader := io.LimitReader(file, partSize)
limitReader = bufio.NewReader(limitReader)
Expand Down Expand Up @@ -402,21 +403,22 @@ func (mu *MultipartUpload) abort(logMsgPrefix string, multipartUploadClient *htt
return errorutils.CheckResponseStatusWithBody(resp, body, http.StatusNoContent)
}

// Calculates the part size based on the file size and the part number.
// Calculates the part size based on the file size, the part number and the requested chunk size.
// fileSize - the file size
// partNumber - the current part number
func calculatePartSize(fileSize int64, partNumber int64) int64 {
partOffset := partNumber * uploadPartSize
if partOffset+uploadPartSize > fileSize {
// requestedChunkSize - chunk size requested by the user, or default.
func calculatePartSize(fileSize, partNumber, requestedChunkSize int64) int64 {
partOffset := partNumber * requestedChunkSize
if partOffset+requestedChunkSize > fileSize {
return fileSize - partOffset
}
return uploadPartSize
return requestedChunkSize
}

// Calculates the number of parts based on the file size and the default part size.
// Calculates the number of parts based on the file size and the requested chunks size.
// fileSize - the file size
func calculateNumberOfParts(fileSize int64) int64 {
return (fileSize + uploadPartSize - 1) / uploadPartSize
func calculateNumberOfParts(fileSize, chunkSize int64) int64 {
return (fileSize + chunkSize - 1) / chunkSize
}

func parseMultipartUploadStatus(status statusResponse) (shouldKeepPolling, shouldRerunComplete bool, err error) {
Expand Down
32 changes: 16 additions & 16 deletions artifactory/services/utils/multipartupload_test.go
Expand Up @@ -109,7 +109,7 @@ func TestUploadPartsConcurrentlyTooManyAttempts(t *testing.T) {
defer cleanUp()

// Write something to the file
buf := make([]byte, uploadPartSize*3)
buf := make([]byte, DefaultUploadChunkSize*3)
_, err := rand.Read(buf)
assert.NoError(t, err)
_, err = tempFile.Write(buf)
Expand Down Expand Up @@ -146,7 +146,7 @@ func TestUploadPartsConcurrentlyTooManyAttempts(t *testing.T) {

// Execute uploadPartsConcurrently
fileSize := int64(len(buf))
err = multipartUpload.uploadPartsConcurrently("", fileSize, splitCount, tempFile.Name(), nil, &httputils.HttpClientDetails{})
err = multipartUpload.uploadPartsConcurrently("", fileSize, DefaultUploadChunkSize, splitCount, tempFile.Name(), nil, &httputils.HttpClientDetails{})
assert.ErrorIs(t, err, errTooManyAttempts)
}

Expand Down Expand Up @@ -285,19 +285,19 @@ var calculatePartSizeProvider = []struct {
partNumber int64
expectedPartSize int64
}{
{uploadPartSize - 1, 0, uploadPartSize - 1},
{uploadPartSize, 0, uploadPartSize},
{uploadPartSize + 1, 0, uploadPartSize},
{DefaultUploadChunkSize - 1, 0, DefaultUploadChunkSize - 1},
{DefaultUploadChunkSize, 0, DefaultUploadChunkSize},
{DefaultUploadChunkSize + 1, 0, DefaultUploadChunkSize},

{uploadPartSize*2 - 1, 1, uploadPartSize - 1},
{uploadPartSize * 2, 1, uploadPartSize},
{uploadPartSize*2 + 1, 1, uploadPartSize},
{DefaultUploadChunkSize*2 - 1, 1, DefaultUploadChunkSize - 1},
{DefaultUploadChunkSize * 2, 1, DefaultUploadChunkSize},
{DefaultUploadChunkSize*2 + 1, 1, DefaultUploadChunkSize},
}

func TestCalculatePartSize(t *testing.T) {
for _, testCase := range calculatePartSizeProvider {
t.Run(fmt.Sprintf("fileSize: %d partNumber: %d", testCase.fileSize, testCase.partNumber), func(t *testing.T) {
assert.Equal(t, testCase.expectedPartSize, calculatePartSize(testCase.fileSize, testCase.partNumber))
assert.Equal(t, testCase.expectedPartSize, calculatePartSize(testCase.fileSize, testCase.partNumber, DefaultUploadChunkSize))
})
}
}
Expand All @@ -308,19 +308,19 @@ var calculateNumberOfPartsProvider = []struct {
}{
{0, 0},
{1, 1},
{uploadPartSize - 1, 1},
{uploadPartSize, 1},
{uploadPartSize + 1, 2},
{DefaultUploadChunkSize - 1, 1},
{DefaultUploadChunkSize, 1},
{DefaultUploadChunkSize + 1, 2},

{uploadPartSize*2 - 1, 2},
{uploadPartSize * 2, 2},
{uploadPartSize*2 + 1, 3},
{DefaultUploadChunkSize*2 - 1, 2},
{DefaultUploadChunkSize * 2, 2},
{DefaultUploadChunkSize*2 + 1, 3},
}

func TestCalculateNumberOfParts(t *testing.T) {
for _, testCase := range calculateNumberOfPartsProvider {
t.Run(fmt.Sprintf("fileSize: %d", testCase.fileSize), func(t *testing.T) {
assert.Equal(t, testCase.expectedNumberOfParts, calculateNumberOfParts(testCase.fileSize))
assert.Equal(t, testCase.expectedNumberOfParts, calculateNumberOfParts(testCase.fileSize, DefaultUploadChunkSize))
})
}
}
Expand Down
22 changes: 22 additions & 0 deletions artifactory/services/utils/storageutils.go
Expand Up @@ -3,6 +3,7 @@ package utils
import (
"encoding/json"
"errors"
"fmt"
)

const (
Expand Down Expand Up @@ -126,3 +127,24 @@ type FileStoreSummary struct {
UsedSpace string `json:"usedSpace,omitempty"`
FreeSpace string `json:"freeSpace,omitempty"`
}

func ConvertIntToStorageSizeString(num int64) string {
if num > SizeTiB {
newNum := float64(num) / float64(SizeTiB)
stringNum := fmt.Sprintf("%.1f", newNum)
return stringNum + "TB"
}
if num > SizeGiB {
newNum := float64(num) / float64(SizeGiB)
stringNum := fmt.Sprintf("%.1f", newNum)
return stringNum + "GB"
}
if num > SizeMiB {
newNum := float64(num) / float64(SizeMiB)
stringNum := fmt.Sprintf("%.1f", newNum)
return stringNum + "MB"
}
newNum := float64(num) / float64(SizeKib)
stringNum := fmt.Sprintf("%.1f", newNum)
return stringNum + "KB"
}
18 changes: 18 additions & 0 deletions artifactory/services/utils/storageutils_test.go
Expand Up @@ -34,3 +34,21 @@ func buildFakeStorageInfo() StorageInfo {
FileStoreSummary: FileStoreSummary{},
}
}

func TestConvertIntToStorageSizeString(t *testing.T) {
tests := []struct {
num int
output string
}{
{12546, "12.3KB"},
{148576, "145.1KB"},
{2587985, "2.5MB"},
{12896547, "12.3MB"},
{12896547785, "12.0GB"},
{5248965785422365, "4773.9TB"},
}

for _, test := range tests {
assert.Equal(t, test.output, ConvertIntToStorageSizeString(int64(test.num)))
}
}
15 changes: 10 additions & 5 deletions auth/authutils.go
Expand Up @@ -102,12 +102,17 @@ func ExtractUsernameFromAccessToken(token string) (username string) {
}

// Extract username from subject.
usernameStartIndex := strings.LastIndex(tokenPayload.Subject, "/")
if usernameStartIndex < 0 {
err = errorutils.CheckErrorf("couldn't extract username from access-token's subject: %s", tokenPayload.Subject)
return
if strings.HasPrefix(tokenPayload.Subject, "jfrt@") || strings.Contains(tokenPayload.Subject, "/users/") {
usernameStartIndex := strings.LastIndex(tokenPayload.Subject, "/")
if usernameStartIndex < 0 {
err = errorutils.CheckErrorf("couldn't extract username from access-token's subject: %s", tokenPayload.Subject)
return
}
username = tokenPayload.Subject[usernameStartIndex+1:]
} else {
// OICD token for groups scope
username = tokenPayload.Subject
}
username = tokenPayload.Subject[usernameStartIndex+1:]
if username == "" {
err = errorutils.CheckErrorf("empty username extracted from access-token's subject: %s", tokenPayload.Subject)
}
Expand Down
11 changes: 11 additions & 0 deletions auth/authutils_test.go
Expand Up @@ -3,6 +3,8 @@ package auth
import (
"testing"

"github.com/jfrog/jfrog-client-go/utils/log"
"github.com/jfrog/jfrog-client-go/utils/tests"
"github.com/stretchr/testify/assert"
)

Expand All @@ -15,6 +17,8 @@ var (
token3 = "eyJ2ZXIiOiIyIiwidHlwIjoiSldUIiwiYWxnIjoiUlMyNTYiLCJraWQiOiJIcnU2VHctZk1yOTV3dy12TDNjV3ZBVjJ3Qm9FSHpHdGlwUEFwOE1JdDljIn0"
// #nosec G101
token4 = "eyJ2ZXIiOiIyIiwidHlwIjoiSldUIiwiYWxnIjoiUlMyNTYiLCJraWQiOiJsS0NYXzFvaTBQbTZGdF9XRklkejZLZ1g4U0FULUdOY0lJWXRjTC1KM084In0.eyJzdWIiOiJqZmZlQDAwMFwvdXNlcnNcL3RlbXB1c2VyIiwic2NwIjoiYXBwbGllZC1wZXJtaXNzaW9uc1wvYWRtaW4gYXBpOioiLCJhdWQiOlsiamZydEAqIiwiamZtZEAqIiwiamZldnRAKiIsImpmYWNAKiJdLCJpc3MiOiJqZmZlQDAwMCIsImV4cCI6MTYxNjQ4OTU4NSwiaWF0IjoxNjE2NDg1OTg1LCJqdGkiOiI0OTBlYWEzOS1mMzYxLTQxYjAtOTA5Ni1kNjg5NmQ0ZWQ3YjEifQ.J5P8Pu5tqEjgnLFLEoCdh1LJHWiMmEHht95v0EFuixwO-osq7sfXua_UCGBkKbmqVSGKew9kl_LTcbq_uMe281_5q2yYxT74iqc2wQ1K0uovEUeIU6E65oi70JwUWUwcF3sNJ2gFatnvgSu-2Kv6m-DtSIW36WS3Mh8uMZQ19ob4fmueVmMFyQsp0EEG6xFYeOK6SB8OUd0gAd_XvXiSRuF0eLabhKmXM2pVBLYfd2KIMlkFckEOGGOzeglvA62xmP4Ik7UsF487NAo0LeS_Pd79owr0jtgTYkCTrLlFhUzUMDVmD_LsCMyf_S4CJxhwkCRhhy9SYSs1WPgknL3--w"
// #nosec G101
token5 = "eyJ2ZXIiOiIyIiwidHlwIjoiSldUIiwiYWxnIjoiUlMyNTYiLCJraWQiOiJDRlVIRER4UXZaM1VNWEZxS0ZWUlFiOEFROEd6VWxSZkZJMEt4TmIzdk1jIn0.eyJzdWIiOiJ5YWhhdi90ZXN0LXJlcG8iLCJzY3AiOiJhcHBsaWVkLXBlcm1pc3Npb25zL2dyb3VwczpcImFkbWluLWdyb3VwXCIsIiwiYXVkIjoiKkAqIiwiaXNzIjoiamZhY0AwMWdnbXFxcDc0MzZuOTB3d3I4Ym5nMXp5OSIsImV4cCI6MTcxNTE4MzA3MiwiaWF0IjoxNzE1MTgzMDEyLCJqdGkiOiJmN2IxMmIzMi0xMmNkLTQ1Y2ItYWZjYS1iNTYyMjc2YjY0YmQifQ.I6df8E0_1t7uYzSQkiQBNh9GIGyr541rIRQ8BDD401N4DV98dWsqACmdlYTOAaxn_el4Lz7_OaK0GnVNGf9hiZz9QKaXbe-HnL9jG-TobpOlyhkc6iBpnizuZ9T9YiveCG_NgDMWn5syiZ912t6PuZqNN2JmwswqfE9QDm6xCH8fu7h0Rs1qDNkahtgQvO99e5d7LnuOS9VfkXBxLDZ5AeUbd89zmujgDB4hMXB3J-dQ3QxGHRPS_KUo7sf7lRvn4PydYkhbhrhg6GP6ss6rMmEJM5v8azMTrkLwksoCWtK9YpD5S70f7AoE5U5j5BttZ0S5dPGagKWZJiA1egna-w"
)

func TestExtractUsernameFromAccessToken(t *testing.T) {
Expand All @@ -27,7 +31,13 @@ func TestExtractUsernameFromAccessToken(t *testing.T) {
{"", token2, true},
{"", token3, true},
{"tempuser", token4, false},
{"yahav/test-repo", token5, false},
}
// Discard output logging to prevent negative logs
previousLog := tests.RedirectLogOutputToNil()
defer func() {
log.SetLogger(previousLog)
}()

for _, testCase := range testCases {
username := ExtractUsernameFromAccessToken(testCase.inputToken)
Expand All @@ -46,6 +56,7 @@ func TestExtractSubjectFromAccessToken(t *testing.T) {
{"jfrt@001c3gffhg2e8w6149e3a2q0w97", token2, false},
{"", token3, true},
{"jffe@000/users/tempuser", token4, false},
{"yahav/test-repo", token5, false},
}

for _, testCase := range testCases {
Expand Down

0 comments on commit d0fbc99

Please sign in to comment.