Skip to content

Commit

Permalink
Fixing lint issue
Browse files Browse the repository at this point in the history
  • Loading branch information
raj-prince committed Sep 12, 2024
1 parent 49d2064 commit 6ea9b16
Show file tree
Hide file tree
Showing 10 changed files with 75 additions and 69 deletions.
9 changes: 6 additions & 3 deletions benchmark-script/2x_performance_test/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,18 +16,20 @@ var (
fSSD = flag.Bool("ssd", false, "Given directory is ssd or not.")
fileCount = flag.Int("file-count", 10, "Number of files to read")

FileSize = 512 * OneKB
OneKB = 1024
fileSize = 512 * oneKB
oneKB = 1024
openLatency []int64
readLatency []int64
closeLatency []int64
totalReadLatency []int64
)

// ReadFilesSequentially sequentially reads n (fileCount) number of
// files from a directory one by one.
func ReadFilesSequentially(fileCount int) (err error) {
startTime := time.Now()

b := make([]byte, FileSize*1024)
b := make([]byte, fileSize*1024)

for i := 0; i < fileCount; i++ {

Expand Down Expand Up @@ -75,6 +77,7 @@ func ReadFilesSequentially(fileCount int) (err error) {
return
}

// Report prints the statistical measurement of the given latencies.
func Report(latency []int64, prefix string) {
sort.Slice(latency, func(i, j int) bool {
return latency[i] < latency[j]
Expand Down
1 change: 1 addition & 0 deletions benchmark-script/analyse_scale_result/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ var (
forceDownload = flag.Bool("force-download", false, "Force download or not")
)

// DataRow represents one metrics.
type DataRow struct {
Timestamp int64
ReadLatency float64
Expand Down
17 changes: 3 additions & 14 deletions benchmark-script/dynamic_delay_plotter/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,6 @@ func actualSample(p *plot.Plot, dataRows []DataRow, d *util.Delay) {
xValues[i] = float64(i)
yValues1[i] = d.Value().Seconds()
yValues2[i] = actualDelay.Seconds()

//AddPoints(p, actualDelay.Seconds(), d.Value().Seconds())
}

// Add line series for the first curve (sine)
Expand All @@ -89,24 +87,15 @@ func actualSample(p *plot.Plot, dataRows []DataRow, d *util.Delay) {
fmt.Println("Over threshold: ", samplesOverThreshold)
}

func AddPoints(p *plot.Plot, x float64, y float64) {
// Create a slice of points (we'll have just one point)
pts := plotter.XYs{{X: x, Y: y}} // Adjust these coordinates as needed

// Add the points to the plot
scatter, err := plotter.NewScatter(pts)
if err != nil {
panic(err)
}
p.Add(scatter)
}

// DataRow represents one metrics.
type DataRow struct {
Timestamp int64
ReadLatency float64
Throughput float64
}

// GetDataRows parses all the CSV file present in a directory and return the
// list of DataRow.
func GetDataRows(folder string) ([]DataRow, error) {
// Store all data rows from all files
var allDataRows []DataRow
Expand Down
26 changes: 18 additions & 8 deletions benchmark-script/pprof_test/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,11 @@ import (
)

const (

// KiB means 1024 bytes.
KiB = 1024

// MiB means 1024 KiB.
MiB = 1024 * KiB
)

Expand Down Expand Up @@ -76,6 +80,7 @@ func profileOnce(path string) (err error) {
//
// }

// ObjectAccessControlProjectTeam represents a team.
type ObjectAccessControlProjectTeam struct {
// ProjectNumber: The project number.
ProjectNumber string `json:"projectNumber,omitempty"`
Expand All @@ -100,7 +105,7 @@ type ObjectAccessControlProjectTeam struct {
NullFields []string `json:"-"`
}

// ObjectAccessControl: An access-control entry.
// ObjectAccessControl is an access-control entry.
type ObjectAccessControl struct {
// Bucket: The name of the bucket.
Bucket string `json:"bucket,omitempty"`
Expand All @@ -115,7 +120,7 @@ type ObjectAccessControl struct {
// forms:
// - user-userId
// - user-email
// - group-groupId
// - group-groupID
// - group-email
// - domain-domain
// - project-team-projectId
Expand All @@ -128,8 +133,8 @@ type ObjectAccessControl struct {
// example.com, the entity would be domain-example.com.
Entity string `json:"entity,omitempty"`

// EntityId: The ID for the entity, if any.
EntityId string `json:"entityId,omitempty"`
// EntityID: The ID for the entity, if any.
EntityID string `json:"entityId,omitempty"`

// Etag: HTTP 1.1 Entity tag for the access-control entry.
Etag string `json:"etag,omitempty"`
Expand All @@ -138,8 +143,8 @@ type ObjectAccessControl struct {
// object.
Generation int64 `json:"generation,omitempty,string"`

// Id: The ID of the access-control entry.
Id string `json:"id,omitempty"`
// ID: The ID of the access-control entry.
ID string `json:"id,omitempty"`

// Kind: The kind of item this is. For object access control entries,
// this is always storage#objectAccessControl.
Expand Down Expand Up @@ -177,6 +182,8 @@ type ObjectAccessControl struct {
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}

// Test is just a test struct for memory experiment.
type Test struct {
Name string
ContentType string
Expand Down Expand Up @@ -219,9 +226,10 @@ type Test struct {
ContentDisposition string
CustomTime string
EventBasedHold bool
Acl []ObjectAccessControl
ACL []ObjectAccessControl
}

// CloneMap just do deep copy of a map.
func CloneMap(m map[string]string) map[string]string {

//return &map[string]string{strings.Clone("key1"): strings.Clone("testddddddd"),
Expand All @@ -234,13 +242,15 @@ func CloneMap(m map[string]string) map[string]string {

return newMap
}

// GetObject returns a customized Test object.
func GetObject() *Test {
return &Test{
Name: strings.Clone("aa"),
ContentType: "text/plane",
Metadata: map[string]string{strings.Clone("key1"): strings.Clone("testddddddd"),
strings.Clone("key2"): strings.Clone("testddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddhffffffffffffff")},
Acl: []ObjectAccessControl{{}, {}, {}, {}, {}},
ACL: []ObjectAccessControl{{}, {}, {}, {}, {}},
}
}

Expand Down
1 change: 1 addition & 0 deletions benchmark-script/read_operation/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ var (

eG errgroup.Group

// OneKB means 1024 bytes.
OneKB = 1024

fNumberOfRead = flag.Int("read-count", 1, "number of read iteration")
Expand Down
13 changes: 7 additions & 6 deletions benchmark-script/read_parallelly/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (

const chunkSize = 1024 // Size of the chunk to read

// ReadFile reads a file from a given offset.
func ReadFile(startOffset int64, file *os.File) {
// Create a buffer to hold the chunk data
buffer := make([]byte, chunkSize)
Expand All @@ -18,13 +19,13 @@ func ReadFile(startOffset int64, file *os.File) {
if err != nil && err != io.EOF {
fmt.Println("Error reading chunk:", err)
return
}

if err == io.EOF {
fmt.Println("read completed with Error: ", err)
} else {
if err == io.EOF {
fmt.Println("read completed with Error: ", err)
} else {
fmt.Println("content: ", string(buffer[:]))
fmt.Println("read completed, successfully with startOffset: ", startOffset)
}
fmt.Println("content: ", string(buffer[:]))
fmt.Println("read completed, successfully with startOffset: ", startOffset)
}
}

Expand Down
5 changes: 1 addition & 4 deletions benchmark-script/ssd_test/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ var (

eG errgroup.Group

// OneKB means 1024 bytes.
OneKB = 1024
)

Expand Down Expand Up @@ -64,10 +65,6 @@ func runReadFileOperations() (err error) {
return
}

func MicroSecondsToMilliSecond(microSecond int64) float64 {
return 0.001 * float64(microSecond)
}

func main() {
flag.Parse()

Expand Down
1 change: 1 addition & 0 deletions benchmark-script/write_operations/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ var (

eG errgroup.Group

// OneKB means 1024 bytes.
OneKB = 1024

fFileSize = flag.Int("file-size", 1, "in KB")
Expand Down
62 changes: 32 additions & 30 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,30 +31,29 @@ import (
)

var (
GrpcConnPoolSize = 1
MaxConnsPerHost = 100
MaxIdleConnsPerHost = 100
grpcConnPoolSize = 1
maxConnsPerHost = 100
maxIdleConnsPerHost = 100

// MB means 1024 Kb.
MB = 1024 * 1024

NumOfWorker = flag.Int("worker", 48, "Number of concurrent worker to read")
numOfWorker = flag.Int("worker", 48, "Number of concurrent worker to read")

NumOfReadCallPerWorker = flag.Int("read-call-per-worker", 1000000, "Number of read call per worker")
numOfReadCallPerWorker = flag.Int("read-call-per-worker", 1000000, "Number of read call per worker")

MaxRetryDuration = 30 * time.Second
maxRetryDuration = 30 * time.Second

RetryMultiplier = 2.0
retryMultiplier = 2.0

BucketName = flag.String("bucket", "princer-working-dirs", "GCS bucket name.")
bucketName = flag.String("bucket", "princer-working-dirs", "GCS bucket name.")

// ProjectName denotes gcp project name.
ProjectName = flag.String("project", "gcs-fuse-test", "GCP project name.")

clientProtocol = flag.String("client-protocol", "http", "Network protocol.")

// ObjectNamePrefix<worker_id>ObjectNameSuffix is the object name format.
// Here, worker id goes from <0 to NumberOfWorker>.
ObjectNamePrefix = "princer_100M_files/file_"
ObjectNameSuffix = ""
clientProtocol = flag.String("client-protocol", "http", "Network protocol.")
objectNamePrefix = "princer_100M_files/file_"
objectNameSuffix = ""

tracerName = "princer-storage-benchmark"
enableTracing = flag.Bool("enable-tracing", false, "Enable tracing with Cloud Trace export")
Expand All @@ -64,13 +63,14 @@ var (
eG errgroup.Group
)

func CreateHttpClient(ctx context.Context, isHttp2 bool) (client *storage.Client, err error) {
// CreateHTTPClient create http storage client.
func CreateHTTPClient(ctx context.Context, isHTTP2 bool) (client *storage.Client, err error) {
var transport *http.Transport
// Using http1 makes the client more performant.
if !isHttp2 {
if !isHTTP2 {
transport = &http.Transport{
MaxConnsPerHost: MaxConnsPerHost,
MaxIdleConnsPerHost: MaxIdleConnsPerHost,
MaxConnsPerHost: maxConnsPerHost,
MaxIdleConnsPerHost: maxIdleConnsPerHost,
// This disables HTTP/2 in transport.
TLSNextProto: make(
map[string]func(string, *tls.Conn) http.RoundTripper,
Expand All @@ -80,7 +80,7 @@ func CreateHttpClient(ctx context.Context, isHttp2 bool) (client *storage.Client
// For http2, change in MaxConnsPerHost doesn't affect the performance.
transport = &http.Transport{
DisableKeepAlives: true,
MaxConnsPerHost: MaxConnsPerHost,
MaxConnsPerHost: maxConnsPerHost,
ForceAttemptHTTP2: true,
}
}
Expand Down Expand Up @@ -108,28 +108,30 @@ func CreateHttpClient(ctx context.Context, isHttp2 bool) (client *storage.Client
return storage.NewClient(ctx, option.WithHTTPClient(httpClient))
}

// CreateGrpcClient creates grpc client.
func CreateGrpcClient(ctx context.Context) (client *storage.Client, err error) {
if err := os.Setenv("GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS", "true"); err != nil {
log.Fatalf("error setting direct path env var: %v", err)
}

client, err = storage.NewGRPCClient(ctx, option.WithGRPCConnectionPool(GrpcConnPoolSize))
client, err = storage.NewGRPCClient(ctx, option.WithGRPCConnectionPool(grpcConnPoolSize))

if err := os.Unsetenv("GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS"); err != nil {
log.Fatalf("error while unsetting direct path env var: %v", err)
}
return
}

func ReadObject(ctx context.Context, workerId int, bucketHandle *storage.BucketHandle) (err error) {
// ReadObject creates reader object corresponding to workerID with the help of bucketHandle.
func ReadObject(ctx context.Context, workerID int, bucketHandle *storage.BucketHandle) (err error) {

objectName := ObjectNamePrefix + strconv.Itoa(workerId) + ObjectNameSuffix
objectName := objectNamePrefix + strconv.Itoa(workerID) + objectNameSuffix

for i := 0; i < *NumOfReadCallPerWorker; i++ {
for i := 0; i < *numOfReadCallPerWorker; i++ {
var span trace.Span
traceCtx, span := otel.GetTracerProvider().Tracer(tracerName).Start(ctx, "ReadObject")
span.SetAttributes(
attribute.KeyValue{Key: "bucket", Value: attribute.StringValue(*BucketName)},
attribute.KeyValue{Key: "bucket", Value: attribute.StringValue(*bucketName)},
)
start := time.Now()
object := bucketHandle.Object(objectName)
Expand Down Expand Up @@ -180,7 +182,7 @@ func main() {
var client *storage.Client
var err error
if *clientProtocol == "http" {
client, err = CreateHttpClient(ctx, false)
client, err = CreateHTTPClient(ctx, false)
} else {
client, err = CreateGrpcClient(ctx)
}
Expand All @@ -192,13 +194,13 @@ func main() {

client.SetRetry(
storage.WithBackoff(gax.Backoff{
Max: MaxRetryDuration,
Multiplier: RetryMultiplier,
Max: maxRetryDuration,
Multiplier: retryMultiplier,
}),
storage.WithPolicy(storage.RetryAlways))

// assumes bucket already exist
bucketHandle := client.Bucket(*BucketName)
bucketHandle := client.Bucket(*bucketName)

// Enable stack-driver exporter.
registerLatencyView()
Expand All @@ -211,12 +213,12 @@ func main() {
defer closeSDExporter()

// Run the actual workload
for i := 0; i < *NumOfWorker; i++ {
for i := 0; i < *numOfWorker; i++ {
idx := i
eG.Go(func() error {
err = ReadObject(ctx, idx, bucketHandle)
if err != nil {
err = fmt.Errorf("while reading object %v: %w", ObjectNamePrefix+strconv.Itoa(idx), err)
err = fmt.Errorf("while reading object %v: %w", objectNamePrefix+strconv.Itoa(idx), err)
return err
}
return err
Expand Down
Loading

0 comments on commit 6ea9b16

Please sign in to comment.