From 3257f886027d720f22245e4a4a68acc86a4c6b59 Mon Sep 17 00:00:00 2001 From: Benjamin Lefaudeux Date: Mon, 20 Jan 2025 18:15:51 +0100 Subject: [PATCH 1/2] camelCase --- go.mod | 2 +- pkg/generator_db.go | 20 +++---- pkg/generator_filesystem.go | 9 ++- pkg/serdes.go | 106 ++++++++++++++++++------------------ pkg/transforms.go | 38 ++++++------- pkg/worker_filesystem.go | 28 +++++----- pkg/worker_http.go | 16 +++--- pkg/worker_pool.go | 16 +++--- 8 files changed, 117 insertions(+), 118 deletions(-) diff --git a/go.mod b/go.mod index a62ed42..98eb710 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,6 @@ require ( require ( golang.org/x/image v0.18.0 // indirect - golang.org/x/net v0.25.0 // indirect + golang.org/x/net v0.33.0 // indirect golang.org/x/text v0.16.0 // indirect ) diff --git a/pkg/generator_db.go b/pkg/generator_db.go index 5fdaeba..0e5796d 100644 --- a/pkg/generator_db.go +++ b/pkg/generator_db.go @@ -94,7 +94,7 @@ type SourceDBConfig struct { HasLatents string `json:"has_latents"` LacksLatents string `json:"lacks_latents"` - ReturnLatents string `json:"return_latents"` + ReturnLatents string `json:"returnLatents"` ReturnDuplicateState bool `json:"return_duplicate_state"` MinShortEdge int `json:"min_short_edge"` @@ -341,9 +341,9 @@ func getHTTPRequest(api_url string, api_key string, request dbRequest) *http.Req } else { api_url += "images/" } - request_url, _ := http.NewRequest("GET", api_url, nil) - request_url.Header.Add("Authorization", "Token "+api_key) - req := request_url.URL.Query() + requestURL, _ := http.NewRequest("GET", api_url, nil) + requestURL.Header.Add("Authorization", "Token "+api_key) + req := requestURL.URL.Query() maybeAddField := func(req *url.Values, field string, value string) { if value != "" { @@ -352,9 +352,9 @@ func getHTTPRequest(api_url string, api_key string, request dbRequest) *http.Req } // Limit the returned latents to the ones we asked for - return_latents := request.hasLatents + returnLatents := request.hasLatents if request.hasMasks != "" { - return_latents += "," + request.hasMasks + returnLatents += "," + request.hasMasks } maybeAddField(&req, "fields", request.fields) @@ -373,7 +373,7 @@ func getHTTPRequest(api_url string, api_key string, request dbRequest) *http.Req maybeAddField(&req, "has_latents", request.hasLatents) maybeAddField(&req, "lacks_latents", request.lacksLatents) - maybeAddField(&req, "return_latents", return_latents) + maybeAddField(&req, "returnLatents", returnLatents) maybeAddField(&req, "short_edge__gte", request.minShortEdge) maybeAddField(&req, "short_edge__lte", request.maxShortEdge) @@ -385,8 +385,8 @@ func getHTTPRequest(api_url string, api_key string, request dbRequest) *http.Req maybeAddField(&req, "partitions_count", request.partitionsCount) maybeAddField(&req, "partition", request.partition) - request_url.URL.RawQuery = req.Encode() - fmt.Println("Request URL:", request_url.URL.String()) + requestURL.URL.RawQuery = req.Encode() + fmt.Println("Request URL:", requestURL.URL.String()) fmt.Println() - return request_url + return requestURL } diff --git a/pkg/generator_filesystem.go b/pkg/generator_filesystem.go index 7766f34..400ea4e 100644 --- a/pkg/generator_filesystem.go +++ b/pkg/generator_filesystem.go @@ -43,9 +43,9 @@ type datagoGeneratorFileSystem struct { } func newDatagoGeneratorFileSystem(config SourceFileSystemConfig) datagoGeneratorFileSystem { - supported_img_extensions := []string{".jpg", ".jpeg", ".png", ".JPEG", ".JPG", ".PNG"} + supportedImgExtensions := []string{".jpg", ".jpeg", ".png", ".JPEG", ".JPG", ".PNG"} var extensionsMap = make(set) - for _, ext := range supported_img_extensions { + for _, ext := range supportedImgExtensions { extensionsMap.Add(ext) } @@ -54,7 +54,7 @@ func newDatagoGeneratorFileSystem(config SourceFileSystemConfig) datagoGenerator } fmt.Println("File system root directory", config.RootPath) - fmt.Println("Supported image extensions", supported_img_extensions) + fmt.Println("Supported image extensions", supportedImgExtensions) fmt.Println("Rank and World Size", config.Rank, config.WorldSize) return datagoGeneratorFileSystem{config: config, extensions: extensionsMap} @@ -84,8 +84,7 @@ func (f datagoGeneratorFileSystem) generatePages(ctx context.Context, chanPages if !info.IsDir() && f.extensions.Contains(filepath.Ext(path)) { if f.config.WorldSize > 1 && hash(path)%f.config.WorldSize != f.config.Rank || f.config.WorldSize == 1 { - new_sample := fsSampleMetadata{FilePath: path, FileName: info.Name()} - samples = append(samples, SampleDataPointers(new_sample)) + samples = append(samples, SampleDataPointers(fsSampleMetadata{FilePath: path, FileName: info.Name()})) } } diff --git a/pkg/serdes.go b/pkg/serdes.go index 0bb71ab..84ea916 100644 --- a/pkg/serdes.go +++ b/pkg/serdes.go @@ -19,15 +19,15 @@ func readBodyBuffered(resp *http.Response) ([]byte, error) { bufferSize := 2048 * 1024 // 2MB // Create a fixed-size buffer for reading - local_buffer := make([]byte, bufferSize) + localBuffer := make([]byte, bufferSize) for { - n, err := resp.Body.Read(local_buffer) + n, err := resp.Body.Read(localBuffer) if err != nil && err != io.EOF { return nil, err } if n > 0 { - body.Write(local_buffer[:n]) + body.Write(localBuffer[:n]) } if err == io.EOF { break @@ -36,7 +36,7 @@ func readBodyBuffered(resp *http.Response) ([]byte, error) { return body.Bytes(), nil } -func imageFromBuffer(buffer []byte, transform *ARAwareTransform, aspect_ratio float64, pre_encode_image bool, is_mask bool) (*ImagePayload, float64, error) { +func imageFromBuffer(buffer []byte, transform *ARAwareTransform, aspectRatio float64, encodeImage bool, isMask bool) (*ImagePayload, float64, error) { // Decode the image payload using vips img, err := vips.NewImageFromBuffer(buffer) if err != nil { @@ -49,10 +49,10 @@ func imageFromBuffer(buffer []byte, transform *ARAwareTransform, aspect_ratio fl } // Optionally crop and resize the image on the fly. Save the aspect ratio in the process for future use - original_width, original_height := img.Width(), img.Height() + originalWidth, originalHeight := img.Width(), img.Height() if transform != nil { - aspect_ratio, err = transform.cropAndResizeToClosestAspectRatio(img, aspect_ratio) + aspectRatio, err = transform.cropAndResizeToClosestAspectRatio(img, aspectRatio) if err != nil { return nil, -1., err } @@ -71,7 +71,7 @@ func imageFromBuffer(buffer []byte, transform *ARAwareTransform, aspect_ratio fl } // If the image is not a mask but is 1 channel, we want to convert it to 3 channels - if (img.Bands() == 1) && !is_mask { + if (img.Bands() == 1) && !isMask { err = img.ToColorSpace(vips.InterpretationSRGB) if err != nil { fmt.Println("Error converting to sRGB:", err) @@ -80,55 +80,55 @@ func imageFromBuffer(buffer []byte, transform *ARAwareTransform, aspect_ratio fl } // If requested, re-encode the image to a jpg or png - var img_bytes []byte + var imgBytes []byte var channels int - var bit_depth int + var bitDepth int - if pre_encode_image { + if encodeImage { if err != nil { return nil, -1., err } if img.Bands() == 3 { // Re-encode the image to a jpg - img_bytes, _, err = img.ExportJpeg(&vips.JpegExportParams{Quality: 95}) + imgBytes, _, err = img.ExportJpeg(&vips.JpegExportParams{Quality: 95}) if err != nil { return nil, -1., err } } else { // Re-encode the image to a png - img_bytes, _, err = img.ExportPng(vips.NewPngExportParams()) + imgBytes, _, err = img.ExportPng(vips.NewPngExportParams()) if err != nil { return nil, -1., err } } channels = -1 // Signal that we have encoded the image } else { - img_bytes, err = img.ToBytes() + imgBytes, err = img.ToBytes() if err != nil { return nil, -1., err } channels = img.Bands() // Define bit depth de facto, not exposed in the vips interface - bit_depth = len(img_bytes) / (width * height * channels) * 8 // 8 bits per byte + bitDepth = len(imgBytes) / (width * height * channels) * 8 // 8 bits per byte } - if bit_depth == 0 && !pre_encode_image { + if bitDepth == 0 && !encodeImage { panic("Bit depth not set") } - img_payload := ImagePayload{ - Data: img_bytes, - OriginalHeight: original_height, - OriginalWidth: original_width, + imgPayload := ImagePayload{ + Data: imgBytes, + OriginalHeight: originalHeight, + OriginalWidth: originalWidth, Height: height, Width: width, Channels: channels, - BitDepth: bit_depth, + BitDepth: bitDepth, } - return &img_payload, aspect_ratio, nil + return &imgPayload, aspectRatio, nil } func fetchURL(client *http.Client, url string, retries int) (urlPayload, error) { @@ -160,14 +160,14 @@ func fetchURL(client *http.Client, url string, retries int) (urlPayload, error) return urlPayload{url: url, content: nil}, fmt.Errorf("%s", err_msg) } -func fetchImage(client *http.Client, url string, retries int, transform *ARAwareTransform, aspect_ratio float64, pre_encode_image bool, is_mask bool) (*ImagePayload, float64, error) { - err_report := fmt.Errorf("failed fetching image %s", url) +func fetchImage(client *http.Client, url string, retries int, transform *ARAwareTransform, aspectRatio float64, encodeImage bool, isMask bool) (*ImagePayload, float64, error) { + errReport := fmt.Errorf("failed fetching image %s", url) for i := 0; i < retries; i++ { // Get the raw image payload resp, err := client.Get(url) if err != nil { - err_report = err + errReport = err exponentialBackoffWait(i) // Renew the client in case the connection was closed @@ -178,79 +178,79 @@ func fetchImage(client *http.Client, url string, retries int, transform *ARAware body_bytes, err := readBodyBuffered(resp) if err != nil { - err_report = err + errReport = err exponentialBackoffWait(i) continue } // Decode into a flat buffer using vips - img_payload_ptr, aspect_ratio, err := imageFromBuffer(body_bytes, transform, aspect_ratio, pre_encode_image, is_mask) + imgPayload_ptr, aspectRatio, err := imageFromBuffer(body_bytes, transform, aspectRatio, encodeImage, isMask) if err != nil { break } - return img_payload_ptr, aspect_ratio, nil + return imgPayload_ptr, aspectRatio, nil } - return nil, -1., err_report + return nil, -1., errReport } -func fetchSample(config *SourceDBConfig, http_client *http.Client, sample_result dbSampleMetadata, transform *ARAwareTransform, pre_encode_image bool) *Sample { +func fetchSample(config *SourceDBConfig, httpClient *http.Client, sampleResult dbSampleMetadata, transform *ARAwareTransform, encodeImage bool) *Sample { // Per sample work: // - fetch the raw payloads // - deserialize / decode, depending on the types // return the result to the samples channel retries := 5 - img_payload := &ImagePayload{} + imgPayload := &ImagePayload{} - aspect_ratio := -1. // Not initialized to begin with + aspectRatio := -1. // Not initialized to begin with // Base image if config.RequireImages { - base_image, new_aspect_ratio, err := fetchImage(http_client, sample_result.ImageDirectURL, retries, transform, aspect_ratio, pre_encode_image, false) + baseImage, newAspectRatio, err := fetchImage(httpClient, sampleResult.ImageDirectURL, retries, transform, aspectRatio, encodeImage, false) if err != nil { - fmt.Println("Error fetching image:", sample_result.Id) + fmt.Println("Error fetching image:", sampleResult.Id) return nil } else { - img_payload = base_image - aspect_ratio = new_aspect_ratio + imgPayload = baseImage + aspectRatio = newAspectRatio } } // Latents latents := make(map[string]LatentPayload) masks := make(map[string]ImagePayload) - additional_images := make(map[string]ImagePayload) + extraImages := make(map[string]ImagePayload) - for _, latent := range sample_result.Latents { + for _, latent := range sampleResult.Latents { if strings.Contains(latent.LatentType, "image") && !strings.Contains(latent.LatentType, "latent_") { // Image types, registered as latents but they need to be jpg-decoded - new_image, _, err := fetchImage(http_client, latent.URL, retries, transform, aspect_ratio, pre_encode_image, false) + new_image, _, err := fetchImage(httpClient, latent.URL, retries, transform, aspectRatio, encodeImage, false) if err != nil { - fmt.Println("Error fetching masked image:", sample_result.Id, latent.LatentType) + fmt.Println("Error fetching masked image:", sampleResult.Id, latent.LatentType) return nil } - additional_images[latent.LatentType] = *new_image + extraImages[latent.LatentType] = *new_image } else if latent.IsMask { // Mask types, registered as latents but they need to be png-decoded - mask_ptr, _, err := fetchImage(http_client, latent.URL, retries, transform, aspect_ratio, pre_encode_image, true) + mask_ptr, _, err := fetchImage(httpClient, latent.URL, retries, transform, aspectRatio, encodeImage, true) if err != nil { - fmt.Println("Error fetching mask:", sample_result.Id, latent.LatentType) + fmt.Println("Error fetching mask:", sampleResult.Id, latent.LatentType) return nil } masks[latent.LatentType] = *mask_ptr } else { // Vanilla latents, pure binary payloads - latent_payload, err := fetchURL(http_client, latent.URL, retries) + latentPayload, err := fetchURL(httpClient, latent.URL, retries) if err != nil { fmt.Println("Error fetching latent:", err) return nil } latents[latent.LatentType] = LatentPayload{ - latent_payload.content, - len(latent_payload.content), + latentPayload.content, + len(latentPayload.content), } } } @@ -258,17 +258,17 @@ func fetchSample(config *SourceDBConfig, http_client *http.Client, sample_result // Optional embeddings var cocaEmbedding []float32 if config.RequireEmbeddings { - cocaEmbedding = sample_result.CocaEmbedding.Vector + cocaEmbedding = sampleResult.CocaEmbedding.Vector } - return &Sample{ID: sample_result.Id, - Source: sample_result.Source, - Attributes: sample_result.Attributes, - DuplicateState: sample_result.DuplicateState, - Image: *img_payload, + return &Sample{ID: sampleResult.Id, + Source: sampleResult.Source, + Attributes: sampleResult.Attributes, + DuplicateState: sampleResult.DuplicateState, + Image: *imgPayload, Latents: latents, Masks: masks, - AdditionalImages: additional_images, - Tags: sample_result.Tags, + AdditionalImages: extraImages, + Tags: sampleResult.Tags, CocaEmbedding: cocaEmbedding} } diff --git a/pkg/transforms.go b/pkg/transforms.go index d1673c3..3bb515f 100644 --- a/pkg/transforms.go +++ b/pkg/transforms.go @@ -29,37 +29,37 @@ type ARAwareTransform struct { } func buildImageSizeList(defaultImageSize int, downsamplingRatio int, minAspectRatio float64, maxAspectRatio float64) []ImageSize { - patch_size := defaultImageSize / downsamplingRatio - patch_size_sq := float64(patch_size * patch_size) - var image_list []ImageSize + patchSize := defaultImageSize / downsamplingRatio + patchSizeSq := float64(patchSize * patchSize) + var imgSizes []ImageSize - min_patch_w := int(math.Ceil(patch_size_sq * minAspectRatio)) - max_patch_w := int(math.Floor(patch_size_sq * maxAspectRatio)) + minPatchW := int(math.Ceil(patchSizeSq * minAspectRatio)) + maxPatchW := int(math.Floor(patchSizeSq * maxAspectRatio)) - for patch_w := min_patch_w; patch_w <= max_patch_w; patch_w++ { // go over all possible downsampled image widths - patch_h := int(math.Floor(patch_size_sq / float64(patch_w))) // get max height - img_w, img_h := patch_w*downsamplingRatio, patch_h*downsamplingRatio - image_list = append(image_list, ImageSize{img_w, img_h}) + for patchW := minPatchW; patchW <= maxPatchW; patchW++ { // go over all possible downsampled image widths + patchH := int(math.Floor(patchSizeSq / float64(patchW))) // get max height + imgW, imgH := patchW*downsamplingRatio, patchH*downsamplingRatio + imgSizes = append(imgSizes, ImageSize{imgW, imgH}) } - min_patch_h := int(math.Ceil(math.Sqrt(patch_size_sq * 1.0 / maxAspectRatio))) - max_patch_h := int(math.Floor(math.Sqrt(patch_size_sq * 1.0 / minAspectRatio))) - for patch_h := min_patch_h; patch_h <= max_patch_h; patch_h++ { // go over all possible downsampled image heights - patch_w := int(math.Floor(patch_size_sq / float64(patch_h))) // get max width - img_w, img_h := patch_w*downsamplingRatio, patch_h*downsamplingRatio - image_list = append(image_list, ImageSize{img_w, img_h}) + minPatchH := int(math.Ceil(math.Sqrt(patchSizeSq * 1.0 / maxAspectRatio))) + maxPatchH := int(math.Floor(math.Sqrt(patchSizeSq * 1.0 / minAspectRatio))) + for patchH := minPatchH; patchH <= maxPatchH; patchH++ { // go over all possible downsampled image heights + patchW := int(math.Floor(patchSizeSq / float64(patchH))) // get max width + imgW, imgH := patchW*downsamplingRatio, patchH*downsamplingRatio + imgSizes = append(imgSizes, ImageSize{imgW, imgH}) } - return image_list + return imgSizes } func newARAwareTransform(imageConfig ImageTransformConfig) *ARAwareTransform { // Build the image size list - image_list := buildImageSizeList(imageConfig.DefaultImageSize, imageConfig.DownsamplingRatio, imageConfig.MinAspectRatio, imageConfig.MaxAspectRatio) + imgSizes := buildImageSizeList(imageConfig.DefaultImageSize, imageConfig.DownsamplingRatio, imageConfig.MinAspectRatio, imageConfig.MaxAspectRatio) // Fill in the map table to match aspect ratios and image sizes aspectRatioToSize := make(map[float64]ImageSize) - for _, size := range image_list { + for _, size := range imgSizes { aspectRatioToSize[size.AspectRatio()] = size } @@ -69,7 +69,7 @@ func newARAwareTransform(imageConfig ImageTransformConfig) *ARAwareTransform { downsamplingRatio: imageConfig.DownsamplingRatio, minAspectRatio: imageConfig.MinAspectRatio, maxAspectRatio: imageConfig.MaxAspectRatio, - targetImageSizes: image_list, + targetImageSizes: imgSizes, aspectRatioToSize: aspectRatioToSize, } } diff --git a/pkg/worker_filesystem.go b/pkg/worker_filesystem.go index e6b0ebc..30db029 100644 --- a/pkg/worker_filesystem.go +++ b/pkg/worker_filesystem.go @@ -10,52 +10,52 @@ type BackendFileSystem struct { config *DatagoConfig } -func loadSample(filesystem_sample fsSampleMetadata, transform *ARAwareTransform, pre_encode_image bool) *Sample { +func loadSample(fsSample fsSampleMetadata, transform *ARAwareTransform, encodeImage bool) *Sample { // Using mmap to put the file directly into memory, removes buffering needs - r, err := mmap.Open(filesystem_sample.FilePath) + r, err := mmap.Open(fsSample.FilePath) if err != nil { panic(err) } - bytes_buffer := make([]byte, r.Len()) - _, err = r.ReadAt(bytes_buffer, 0) + bytesBuffer := make([]byte, r.Len()) + _, err = r.ReadAt(bytesBuffer, 0) if err != nil { panic(err) } // Decode the image, can error out here also, and return the sample - img_payload, _, err := imageFromBuffer(bytes_buffer, transform, -1., pre_encode_image, false) + imgPayload, _, err := imageFromBuffer(bytesBuffer, transform, -1., encodeImage, false) if err != nil { - fmt.Println("Error loading image:", filesystem_sample.FileName) + fmt.Println("Error loading image:", fsSample.FileName) return nil } - return &Sample{ID: filesystem_sample.FileName, - Image: *img_payload, + return &Sample{ID: fsSample.FileName, + Image: *imgPayload, } } func (b BackendFileSystem) collectSamples(chanSampleMetadata chan SampleDataPointers, chanSamples chan Sample, transform *ARAwareTransform, encodeImages bool) { - sampleWorker := func(worker_handle *worker) { - defer worker_handle.stop() + sampleWorker := func(workerHandle *worker) { + defer workerHandle.stop() for { - worker_handle.state = worker_idle + workerHandle.state = workerStateIdle item_to_fetch, open := <-chanSampleMetadata if !open { return } - worker_handle.state = worker_running + workerHandle.state = workerStateRunning // Cast the item to fetch to the correct type - filesystem_sample, ok := item_to_fetch.(fsSampleMetadata) + fsSample, ok := item_to_fetch.(fsSampleMetadata) if !ok { panic("Failed to cast the item to fetch to dbSampleMetadata. This worker is probably misconfigured") } - sample := loadSample(filesystem_sample, transform, encodeImages) + sample := loadSample(fsSample, transform, encodeImages) if sample != nil { chanSamples <- *sample } diff --git a/pkg/worker_http.go b/pkg/worker_http.go index 7b3cdce..373384f 100644 --- a/pkg/worker_http.go +++ b/pkg/worker_http.go @@ -11,27 +11,27 @@ type BackendHTTP struct { func (b BackendHTTP) collectSamples(chanSampleMetadata chan SampleDataPointers, chanSamples chan Sample, transform *ARAwareTransform, encodeImages bool) { - sampleWorker := func(worker_handle *worker) { - defer worker_handle.stop() + sampleWorker := func(workerHandle *worker) { + defer workerHandle.stop() // One HHTP client per goroutine, make sure we don't run into race conditions when renewing - http_client := http.Client{Timeout: 30 * time.Second} + httpClient := http.Client{Timeout: 30 * time.Second} for { - worker_handle.state = worker_idle - item_to_fetch, open := <-chanSampleMetadata + workerHandle.state = workerStateIdle + itemToFetch, open := <-chanSampleMetadata if !open { return } - worker_handle.state = worker_running + workerHandle.state = workerStateRunning // Cast the item to fetch to the correct type - http_sample, ok := item_to_fetch.(dbSampleMetadata) + httpSample, ok := itemToFetch.(dbSampleMetadata) if !ok { panic("Failed to cast the item to fetch to dbSampleMetadata. This worker is probably misconfigured") } - sample := fetchSample(b.config, &http_client, http_sample, transform, encodeImages) + sample := fetchSample(b.config, &httpClient, httpSample, transform, encodeImages) if sample != nil { chanSamples <- *sample } diff --git a/pkg/worker_pool.go b/pkg/worker_pool.go index 38d5166..573c7cc 100644 --- a/pkg/worker_pool.go +++ b/pkg/worker_pool.go @@ -5,12 +5,12 @@ import ( ) // Define an enum which will be used to track the state of the worker -type worker_state int +type workerState int const ( - worker_idle worker_state = iota - worker_running - worker_done + workerStateIdle workerState = iota + workerStateRunning + workerStateDone ) // Define a stateful worker struct which will be spawned by the worker pool @@ -20,7 +20,7 @@ type worker struct { } func (w *worker) stop() { - w.state = worker_done + w.state = workerStateDone w.done <- true } @@ -28,13 +28,13 @@ func (w *worker) stop() { func runWorkerPool(sampleWorker func(*worker)) { // Shall we just use pond here ? // https://github.com/alitto/pond - worker_pool_size := runtime.NumCPU() + poolSize := runtime.NumCPU() // Start the workers and work on the metadata channel var workers []*worker - for i := 0; i < worker_pool_size; i++ { - newWorker := worker{state: worker_idle, done: make(chan bool)} + for i := 0; i < poolSize; i++ { + newWorker := worker{state: workerStateIdle, done: make(chan bool)} workers = append(workers, &newWorker) go sampleWorker(&newWorker) } From b1b3330d3194f4d826df8a638470772ea9e47c4f Mon Sep 17 00:00:00 2001 From: Benjamin Lefaudeux Date: Mon, 20 Jan 2025 18:29:30 +0100 Subject: [PATCH 2/2] set VIPS_DISC_THRESHOLD to 5GB to make it really unlikely that we fall on to disk --- go.mod | 6 ++++-- go.sum | 6 ++++-- pkg/client.go | 4 ++++ pkg/worker_pool.go | 2 +- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 98eb710..5543689 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module datago -go 1.23.0 +go 1.22.0 + +toolchain go1.22.2 require ( github.com/davidbyttow/govips/v2 v2.16.0 @@ -11,5 +13,5 @@ require ( require ( golang.org/x/image v0.18.0 // indirect golang.org/x/net v0.33.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/text v0.21.0 // indirect ) diff --git a/go.sum b/go.sum index 16357f2..c9b19d4 100644 --- a/go.sum +++ b/go.sum @@ -39,8 +39,9 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -73,8 +74,9 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= diff --git a/pkg/client.go b/pkg/client.go index 2cabeee..c7c353c 100644 --- a/pkg/client.go +++ b/pkg/client.go @@ -162,6 +162,10 @@ type DatagoClient struct { // GetClient is a constructor for the DatagoClient, given a JSON configuration string func GetClient(config DatagoConfig) *DatagoClient { // Initialize the vips library + err := os.Setenv("VIPS_DISC_THRESHOLD", "5g") + if err != nil { + log.Panicf("Error setting VIPS_DISC_THRESHOLD: %v", err) + } vips.LoggingSettings(nil, vips.LogLevelWarning) vips.Startup(nil) diff --git a/pkg/worker_pool.go b/pkg/worker_pool.go index 573c7cc..f0e0469 100644 --- a/pkg/worker_pool.go +++ b/pkg/worker_pool.go @@ -15,7 +15,7 @@ const ( // Define a stateful worker struct which will be spawned by the worker pool type worker struct { - state worker_state // Allows us to track the state of the worker, useful for debugging or dynamic provisioning + state workerState // Allows us to track the state of the worker, useful for debugging or dynamic provisioning done chan bool }