Skip to content

Commit

Permalink
Release 1.1.6
Browse files Browse the repository at this point in the history
  • Loading branch information
sgusakovsky committed Mar 4, 2023
1 parent a828554 commit c40a26e
Show file tree
Hide file tree
Showing 8 changed files with 229 additions and 45 deletions.
2 changes: 1 addition & 1 deletion OpenAIService.podspec
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
Pod::Spec.new do |spec|
spec.name = 'OpenAIService'
spec.version = '1.1.5'
spec.version = '1.1.6'
spec.homepage = 'https://github.com/sgusakovsky/OpenAIService'
spec.license = {
:type => 'MIT',
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ You can use Swift Package Manager to integrate the library by adding the followi

You can use CocoaPods to integrate the library by adding the following dependency.

`pod 'OpenAIService''`
`pod 'OpenAIService'`

## Example Usage

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,21 +15,21 @@ public struct OpenAIGenerationImageBody: Encodable {
public let prompt: String

/// The number of images to generate. Must be between 1 and 10.
public let imageCount: Int?
public let imageCount: Int

/// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024
public let size: OpenAIGenerationImageSize?
public let size: OpenAIGenerationImageSize

/// The format in which the generated images are returned. Must be one of url or b64_json.
public let responseFormat: OpenAIGenerationImageResponseFormat?
public let responseFormat: OpenAIGenerationImageResponseFormat

public let user: String?

public init(
prompt: String,
imageCount: Int? = 1,
size: OpenAIGenerationImageSize? = .large,
responseFormat: OpenAIGenerationImageResponseFormat? = .url,
imageCount: Int = 1,
size: OpenAIGenerationImageSize = .large,
responseFormat: OpenAIGenerationImageResponseFormat = .url,
user: String? = nil
) {
self.prompt = prompt
Expand Down
82 changes: 82 additions & 0 deletions Sources/OpenAIService/Networking/MultipartFormDataRequest.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
//
// MultipartFormDataRequest.swift
// OpenAIDemo
//
// Created by Gusakovsky, Sergey on 5.03.23.
//

import Foundation

struct MultipartFormDataRequest {
private let boundary: String = UUID().uuidString
private let body = NSMutableData()
let endpoint: OpenAIEndpoint

init(endpoint: OpenAIEndpoint) {
self.endpoint = endpoint
}

func asURLRequest() -> URLRequest? {
guard let baseUrl = URL(string: endpoint.baseURL()) else {
return nil
}

guard var urlComponents = URLComponents(url: baseUrl, resolvingAgainstBaseURL: true) else {
return nil
}

urlComponents.path = endpoint.path

guard let url = urlComponents.url else {
return nil
}

var request = URLRequest(url: url)

request.httpMethod = endpoint.method.rawValue
request.setValue("multipart/form-data; boundary=\(boundary)", forHTTPHeaderField: "Content-Type")

self.body.append("--\(boundary)--")
request.httpBody = self.body as Data

return request
}

func addTextField(named name: String, value: String) {
self.body.append(textFormField(named: name, value: value))
}

func addDataField(named name: String, formData: FormData) {
self.body.append(dataFormField(named: name, formData: formData))
}

private func textFormField(named name: String, value: String) -> String {
var fieldString = "--\(boundary)\r\n"
fieldString += "Content-Disposition: form-data; name=\"\(name)\"\r\n"
fieldString += "\r\n"
fieldString += "\(value)\r\n"

return fieldString
}

private func dataFormField(named name: String, formData: FormData) -> Data {
let fieldData = NSMutableData()

fieldData.append("--\(boundary)\r\n")
fieldData.append("Content-Disposition: form-data; name=\"\(name)\"; filename=\"\(formData.fileName)\"\r\n")
fieldData.append("Content-Type: \(formData.mimeType)\r\n")
fieldData.append("\r\n")
fieldData.append(formData.data)
fieldData.append("\r\n")

return fieldData as Data
}
}

extension NSMutableData {
func append(_ string: String) {
if let data = string.data(using: .utf8) {
self.append(data)
}
}
}
30 changes: 11 additions & 19 deletions Sources/OpenAIService/Networking/OpenAIApiClient.swift
Original file line number Diff line number Diff line change
Expand Up @@ -80,35 +80,27 @@ class OpenAIApiClient {
return request
}

func prepareMultipartFormDataRequest<BodyType: Encodable>(
func prepareMultipartFormDataRequest(
_ endpoint: OpenAIEndpoint,
body: BodyType,
body: [String: Any],
config: OpenAIConfiguration
) -> URLRequest? {
guard let baseUrl = URL(string: endpoint.baseURL()) else {
return nil
}

guard var urlComponents = URLComponents(url: baseUrl, resolvingAgainstBaseURL: true) else {
return nil
}
let multipartRequest = MultipartFormDataRequest(endpoint: endpoint)

urlComponents.path = endpoint.path
for (key, value) in body {
if let dataValue = value as? FormData {
multipartRequest.addDataField(named: key, formData: dataValue)
} else {
multipartRequest.addTextField(named: key, value: "\(value)")
}
}

guard let url = urlComponents.url else {
guard var request = multipartRequest.asURLRequest() else {
return nil
}

var request = URLRequest(url: url)
request.httpMethod = endpoint.method.rawValue
request.setValue("Bearer \(config.apiKey)", forHTTPHeaderField: "Authorization")
request.setValue("application/json", forHTTPHeaderField: "Content-Type")

let encoder = JSONEncoder()
if let encoded = try? encoder.encode(body) {
request.httpBody = encoded
}

return request
}

Expand Down
2 changes: 1 addition & 1 deletion Sources/OpenAIService/Networking/OpenAIEndpoint.swift
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ enum OpenAIEndpoint {
case .imagesGenerations:
return "/v1/images/generations"
case .imageEdits:
return "/v1/images/generations"
return "/v1/images/edits"
}
}

Expand Down
77 changes: 77 additions & 0 deletions Sources/OpenAIService/Networking/OpenAIImageEditsBody.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
//
// OpenAIImageEditsBody.swift
// OpenAIDemo
//
// Created by Gusakovsky, Sergey on 5.03.23.
//

import Foundation
#if os(iOS)
import UIKit
#endif

public struct OpenAIImageEditsBody {
public let image: FormData
public let mask: FormData
public let prompt: String
public let numberOfImages: Int
public let size: OpenAIGenerationImageSize
public let responseFormat: OpenAIGenerationImageResponseFormat
public let user: String?

public init(
image: Data,
mask: Data,
prompt: String,
numberOfImages: Int = 1,
size: OpenAIGenerationImageSize = .large,
responseFormat: OpenAIGenerationImageResponseFormat = .url,
user: String? = nil
) {
self.image = FormData(data: image, mimeType: "image/png", fileName: "image.png")
self.mask = FormData(data: mask, mimeType: "image/png", fileName: "image.png")
self.prompt = prompt
self.numberOfImages = numberOfImages
self.size = size
self.responseFormat = responseFormat
self.user = user
}

#if os(iOS)
public init?(
image: UIImage,
mask: UIImage,
prompt: String,
numberOfImages: Int = 1,
size: OpenAIGenerationImageSize = .large,
responseFormat: OpenAIGenerationImageResponseFormat = .url,
user: String? = nil
) {
guard let imageData = image.pngData() else { return nil }
guard let maskData = mask.pngData() else { return nil }
self.image = FormData(data: imageData, mimeType: "image/png", fileName: "image.png")
self.mask = FormData(data: maskData, mimeType: "image/png", fileName: "image.png")
self.prompt = prompt
self.numberOfImages = numberOfImages
self.size = size
self.responseFormat = responseFormat
self.user = user
}
#endif

public var body: [String: Any] {
var result: [String: Any] = [
"image": self.image,
"mask": self.mask,
"prompt": self.prompt,
"n": self.numberOfImages,
"size": self.size.rawValue,
"response_format": self.responseFormat.rawValue
]
if let user = self.user {
result["user"] = user
}

return result
}
}
67 changes: 50 additions & 17 deletions Sources/OpenAIService/OpenAIService.swift
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,7 @@ public final class OpenAIService {

/// Send a Edit request to the OpenAI API
/// - Parameters:
/// - input: The input text to use as a starting point for the edit.
/// - model: The AI Model to Use. Set to `OpenAIEditsModelType.feature(.davinci)` by default which is the most capable model
/// - instruction: The instruction that tells the model how to edit the prompt.
/// - body: Body of chat completion request
/// - completionHandler: Returns an OpenAIEditsResponse Data Model
public func sendEdits(
with body: OpenAIEditsBody,
Expand All @@ -96,9 +94,7 @@ public final class OpenAIService {

/// Send a Image generation request to the OpenAI API
/// - Parameters:
/// - prompt: A text description of the desired image(s). The maximum length is 1000 characters.
/// - imageSize: Size of expected image to Use. Set to `OpenAIGenerationImageSize.large` by default.
/// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
/// - body: Body of chat completion request
/// - completionHandler: Returns an OpenAIGenerationImageResponse Data Model
public func sendImageGeneration(
with body: OpenAIGenerationImageBody,
Expand All @@ -119,14 +115,33 @@ public final class OpenAIService {
)
}

/// Send a Image edits request to the OpenAI API
/// - Parameters:
/// - body: Body of chat completion request
/// - completionHandler: Returns an OpenAIGenerationImageResponse Data Model
public func sendImageEdits(
with body: OpenAIImageEditsBody,
networkQueue: DispatchQueue = .global(qos: .background),
responseQueue: DispatchQueue = .main,
completionHandler: @escaping (Result<OpenAIGenerationImageResponse, OpenAIAPIError>) -> Void
) {
let endpoint = OpenAIEndpoint.imageEdits
guard let request = apiClient.prepareMultipartFormDataRequest(endpoint, body: body.body, config: config) else {
completionHandler(.failure(.genericError(error: RequestError())))
return
}

apiClient.makeRequest(
request: request,
networkQueue: networkQueue,
responseQueue: responseQueue,
completionHandler: completionHandler
)
}

/// Send a Completion to the OpenAI API
/// - Parameters:
/// - prompt: The Text Prompt
/// - model: The AI Model to Use. Set to `OpenAICompletionModelType.gpt3(.davinci)` by default which is the most capable model
/// - maxTokens: The limit character for the returned response, defaults to 16 as per the API
/// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
/// - body: Body of chat completion request
/// - Returns: Returns an OpenAICompletionResponse Data Model
@available(swift 5.5)
@available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *)
Expand Down Expand Up @@ -170,9 +185,7 @@ public final class OpenAIService {

/// Send a Edit request to the OpenAI API
/// - Parameters:
/// - input: The input text to use as a starting point for the edit.
/// - model: The AI Model to Use. Set to `OpenAIEditsModelType.feature(.davinci)` by default which is the most capable model
/// - instruction: The instruction that tells the model how to edit the prompt.
/// - body: Body of chat completion request
/// - Returns: Returns an OpenAIEditsResponse Data Model
@available(swift 5.5)
@available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *)
Expand All @@ -194,9 +207,7 @@ public final class OpenAIService {

/// Send a Image generation request to the OpenAI API
/// - Parameters:
/// - prompt: A text description of the desired image(s). The maximum length is 1000 characters.
/// - imageSize: Size of expected image to Use. Set to `OpenAIGenerationImageSize.large` by default.
/// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
/// - body: Body of chat completion request
/// - Returns: Returns an OpenAIGenerationImageResponse Data Model
@available(swift 5.5)
@available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *)
Expand All @@ -215,4 +226,26 @@ public final class OpenAIService {
}
}
}

/// Send a Image edits request to the OpenAI API
/// - Parameters:
/// - body: Body of chat completion request
/// - Returns: Returns an OpenAIGenerationImageResponse Data Model
@available(swift 5.5)
@available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *)
public func sendImageEdits(
with body: OpenAIImageEditsBody,
networkQueue: DispatchQueue = .global(qos: .background),
responseQueue: DispatchQueue = .main
) async throws -> OpenAIGenerationImageResponse {
return try await withCheckedThrowingContinuation { continuation in
sendImageEdits(
with: body,
networkQueue: networkQueue,
responseQueue: responseQueue
) { result in
continuation.resume(with: result)
}
}
}
}

0 comments on commit c40a26e

Please sign in to comment.