diff --git a/Demo/Demo.xcodeproj/project.pbxproj b/Demo/Demo.xcodeproj/project.pbxproj index 1652a8e..265518c 100644 --- a/Demo/Demo.xcodeproj/project.pbxproj +++ b/Demo/Demo.xcodeproj/project.pbxproj @@ -30,6 +30,16 @@ F442DD342B8119A20032682E /* VisionViewModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = F442DD332B8119A20032682E /* VisionViewModel.swift */; }; F44D5A1D2BF55D5D007B2D3F /* CreateTranslationView.swift in Sources */ = {isa = PBXBuildFile; fileRef = F44D5A1C2BF55D5D007B2D3F /* CreateTranslationView.swift */; }; F44D5A1F2BF55D7D007B2D3F /* CreateTranslationViewModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = F44D5A1E2BF55D7D007B2D3F /* CreateTranslationViewModel.swift */; }; + F4AA12212BF72F6100F2B6BB /* EditImageView.swift in Sources */ = {isa = PBXBuildFile; fileRef = F4AA12202BF72F6100F2B6BB /* EditImageView.swift */; }; + F4AA12232BF72F8900F2B6BB /* EditImageViewModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = F4AA12222BF72F8900F2B6BB /* EditImageViewModel.swift */; }; + F4EAB7B62BF7B8D700ECA18F /* Line.swift in Sources */ = {isa = PBXBuildFile; fileRef = F4EAB7B52BF7B8D700ECA18F /* Line.swift */; }; + F4EAB7B82BF7B8E000ECA18F /* Point.swift in Sources */ = {isa = PBXBuildFile; fileRef = F4EAB7B72BF7B8E000ECA18F /* Point.swift */; }; + F4EAB7BA2BF7B8EE00ECA18F /* SwiftBetaCanvas.swift in Sources */ = {isa = PBXBuildFile; fileRef = F4EAB7B92BF7B8EE00ECA18F /* SwiftBetaCanvas.swift */; }; + F4EAB7BC2BF7B8FE00ECA18F /* View+ReverseMask.swift in Sources */ = {isa = PBXBuildFile; fileRef = F4EAB7BB2BF7B8FE00ECA18F /* View+ReverseMask.swift */; }; + F4EAB7C12BF7B93E00ECA18F /* GalleryView.swift in Sources */ = {isa = PBXBuildFile; fileRef = F4EAB7C02BF7B93E00ECA18F /* GalleryView.swift */; }; + F4EAB7C32BF7B94C00ECA18F /* CameraView.swift in Sources */ = {isa = PBXBuildFile; fileRef = F4EAB7C22BF7B94C00ECA18F /* CameraView.swift */; }; + F4EAB7C62BF7F47C00ECA18F /* VariationImageView.swift in Sources */ = {isa = PBXBuildFile; fileRef = F4EAB7C52BF7F47C00ECA18F /* VariationImageView.swift */; }; + F4EAB7C82BF7F48800ECA18F /* VariationImageViewModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = F4EAB7C72BF7F48800ECA18F /* VariationImageViewModel.swift */; }; /* End PBXBuildFile section */ /* Begin PBXFileReference section */ @@ -56,6 +66,16 @@ F442DD332B8119A20032682E /* VisionViewModel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = VisionViewModel.swift; sourceTree = ""; }; F44D5A1C2BF55D5D007B2D3F /* CreateTranslationView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = CreateTranslationView.swift; sourceTree = ""; }; F44D5A1E2BF55D7D007B2D3F /* CreateTranslationViewModel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = CreateTranslationViewModel.swift; sourceTree = ""; }; + F4AA12202BF72F6100F2B6BB /* EditImageView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = EditImageView.swift; sourceTree = ""; }; + F4AA12222BF72F8900F2B6BB /* EditImageViewModel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = EditImageViewModel.swift; sourceTree = ""; }; + F4EAB7B52BF7B8D700ECA18F /* Line.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Line.swift; sourceTree = ""; }; + F4EAB7B72BF7B8E000ECA18F /* Point.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Point.swift; sourceTree = ""; }; + F4EAB7B92BF7B8EE00ECA18F /* SwiftBetaCanvas.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SwiftBetaCanvas.swift; sourceTree = ""; }; + F4EAB7BB2BF7B8FE00ECA18F /* View+ReverseMask.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "View+ReverseMask.swift"; sourceTree = ""; }; + F4EAB7C02BF7B93E00ECA18F /* GalleryView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = GalleryView.swift; sourceTree = ""; }; + F4EAB7C22BF7B94C00ECA18F /* CameraView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = CameraView.swift; sourceTree = ""; }; + F4EAB7C52BF7F47C00ECA18F /* VariationImageView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = VariationImageView.swift; sourceTree = ""; }; + F4EAB7C72BF7F48800ECA18F /* VariationImageViewModel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = VariationImageViewModel.swift; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -92,10 +112,8 @@ F442DCF62B80A2B20032682E /* DemoApp.swift */, F442DCF82B80A2B20032682E /* ContentView.swift */, F442DD092B80A3D10032682E /* ChatCompletions */, - F442DD1C2B80B74E0032682E /* CreateImages */, - F442DD262B8110C10032682E /* CreateAudio */, - F442DD2B2B8113B40032682E /* CreateTranscription */, - F44D5A1B2BF55D50007B2D3F /* CreateTranslation */, + F4AA121F2BF72EB300F2B6BB /* Image */, + F4AA121E2BF72EAB00F2B6BB /* Audio */, F442DD302B8119770032682E /* Vision */, F442DCFA2B80A2B30032682E /* Assets.xcassets */, F442DD1A2B80A5990032682E /* SwiftOpenAI.plist */, @@ -195,6 +213,58 @@ path = CreateTranslation; sourceTree = ""; }; + F4AA121D2BF72E9600F2B6BB /* EditImage */ = { + isa = PBXGroup; + children = ( + F4AA12202BF72F6100F2B6BB /* EditImageView.swift */, + F4AA12222BF72F8900F2B6BB /* EditImageViewModel.swift */, + F4EAB7B42BF7B8CC00ECA18F /* Dependencies */, + ); + path = EditImage; + sourceTree = ""; + }; + F4AA121E2BF72EAB00F2B6BB /* Audio */ = { + isa = PBXGroup; + children = ( + F442DD262B8110C10032682E /* CreateAudio */, + F442DD2B2B8113B40032682E /* CreateTranscription */, + F44D5A1B2BF55D50007B2D3F /* CreateTranslation */, + ); + path = Audio; + sourceTree = ""; + }; + F4AA121F2BF72EB300F2B6BB /* Image */ = { + isa = PBXGroup; + children = ( + F4EAB7C42BF7F47000ECA18F /* VariationImage */, + F442DD1C2B80B74E0032682E /* CreateImages */, + F4AA121D2BF72E9600F2B6BB /* EditImage */, + ); + path = Image; + sourceTree = ""; + }; + F4EAB7B42BF7B8CC00ECA18F /* Dependencies */ = { + isa = PBXGroup; + children = ( + F4EAB7B52BF7B8D700ECA18F /* Line.swift */, + F4EAB7B72BF7B8E000ECA18F /* Point.swift */, + F4EAB7B92BF7B8EE00ECA18F /* SwiftBetaCanvas.swift */, + F4EAB7BB2BF7B8FE00ECA18F /* View+ReverseMask.swift */, + F4EAB7C02BF7B93E00ECA18F /* GalleryView.swift */, + F4EAB7C22BF7B94C00ECA18F /* CameraView.swift */, + ); + path = Dependencies; + sourceTree = ""; + }; + F4EAB7C42BF7F47000ECA18F /* VariationImage */ = { + isa = PBXGroup; + children = ( + F4EAB7C52BF7F47C00ECA18F /* VariationImageView.swift */, + F4EAB7C72BF7F48800ECA18F /* VariationImageViewModel.swift */, + ); + path = VariationImage; + sourceTree = ""; + }; /* End PBXGroup section */ /* Begin PBXNativeTarget section */ @@ -272,25 +342,35 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + F4AA12232BF72F8900F2B6BB /* EditImageViewModel.swift in Sources */, F442DD2D2B8114330032682E /* CreateTranscriptView.swift in Sources */, + F4EAB7C62BF7F47C00ECA18F /* VariationImageView.swift in Sources */, F442DD202B80B7810032682E /* CreateImagesView.swift in Sources */, F44D5A1F2BF55D7D007B2D3F /* CreateTranslationViewModel.swift in Sources */, F442DD0B2B80A3E80032682E /* ChatView.swift in Sources */, F442DD0F2B80A4330032682E /* TextMessageView.swift in Sources */, F44D5A1D2BF55D5D007B2D3F /* CreateTranslationView.swift in Sources */, + F4AA12212BF72F6100F2B6BB /* EditImageView.swift in Sources */, F442DD232B80B79C0032682E /* LoadingView.swift in Sources */, + F4EAB7B62BF7B8D700ECA18F /* Line.swift in Sources */, F442DD162B80A4E40032682E /* ChatCompletionsViewModel.swift in Sources */, + F4EAB7BC2BF7B8FE00ECA18F /* View+ReverseMask.swift in Sources */, F442DD2F2B8114450032682E /* CreateTranscriptViewModel.swift in Sources */, F442DCF92B80A2B20032682E /* ContentView.swift in Sources */, + F4EAB7C82BF7F48800ECA18F /* VariationImageViewModel.swift in Sources */, F442DD192B80A57D0032682E /* Bundle+OpenAIAPIKey.swift in Sources */, F442DD282B8110FA0032682E /* CreateAudioView.swift in Sources */, F442DD132B80A4760032682E /* TypingIndicatorView.swift in Sources */, + F4EAB7C12BF7B93E00ECA18F /* GalleryView.swift in Sources */, F442DD252B80B7C70032682E /* CreateImageViewModel.swift in Sources */, F442DCF72B80A2B20032682E /* DemoApp.swift in Sources */, F442DD2A2B8111190032682E /* CreateAudioViewModel.swift in Sources */, + F4EAB7C32BF7B94C00ECA18F /* CameraView.swift in Sources */, F442DD322B8119900032682E /* VisionView.swift in Sources */, F442DD342B8119A20032682E /* VisionViewModel.swift in Sources */, + F4EAB7BA2BF7B8EE00ECA18F /* SwiftBetaCanvas.swift in Sources */, F442DD0D2B80A4230032682E /* ConversationView.swift in Sources */, + F4EAB7B82BF7B8E000ECA18F /* Point.swift in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -427,6 +507,7 @@ DEVELOPMENT_TEAM = K4FV5B8ZC4; ENABLE_PREVIEWS = YES; GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_KEY_NSCameraUsageDescription = "We need access to your camera so you can take photos."; INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; INFOPLIST_KEY_UILaunchScreen_Generation = YES; @@ -456,6 +537,7 @@ DEVELOPMENT_TEAM = K4FV5B8ZC4; ENABLE_PREVIEWS = YES; GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_KEY_NSCameraUsageDescription = "We need access to your camera so you can take photos."; INFOPLIST_KEY_UIApplicationSceneManifest_Generation = YES; INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES; INFOPLIST_KEY_UILaunchScreen_Generation = YES; diff --git a/Demo/Demo/CreateAudio/CreateAudioView.swift b/Demo/Demo/Audio/CreateAudio/CreateAudioView.swift similarity index 100% rename from Demo/Demo/CreateAudio/CreateAudioView.swift rename to Demo/Demo/Audio/CreateAudio/CreateAudioView.swift diff --git a/Demo/Demo/CreateAudio/CreateAudioViewModel.swift b/Demo/Demo/Audio/CreateAudio/CreateAudioViewModel.swift similarity index 100% rename from Demo/Demo/CreateAudio/CreateAudioViewModel.swift rename to Demo/Demo/Audio/CreateAudio/CreateAudioViewModel.swift diff --git a/Demo/Demo/CreateTranscription/CreateTranscriptView.swift b/Demo/Demo/Audio/CreateTranscription/CreateTranscriptView.swift similarity index 100% rename from Demo/Demo/CreateTranscription/CreateTranscriptView.swift rename to Demo/Demo/Audio/CreateTranscription/CreateTranscriptView.swift diff --git a/Demo/Demo/CreateTranscription/CreateTranscriptViewModel.swift b/Demo/Demo/Audio/CreateTranscription/CreateTranscriptViewModel.swift similarity index 100% rename from Demo/Demo/CreateTranscription/CreateTranscriptViewModel.swift rename to Demo/Demo/Audio/CreateTranscription/CreateTranscriptViewModel.swift diff --git a/Demo/Demo/CreateTranslation/CreateTranslationView.swift b/Demo/Demo/Audio/CreateTranslation/CreateTranslationView.swift similarity index 100% rename from Demo/Demo/CreateTranslation/CreateTranslationView.swift rename to Demo/Demo/Audio/CreateTranslation/CreateTranslationView.swift diff --git a/Demo/Demo/CreateTranslation/CreateTranslationViewModel.swift b/Demo/Demo/Audio/CreateTranslation/CreateTranslationViewModel.swift similarity index 100% rename from Demo/Demo/CreateTranslation/CreateTranslationViewModel.swift rename to Demo/Demo/Audio/CreateTranslation/CreateTranslationViewModel.swift diff --git a/Demo/Demo/ContentView.swift b/Demo/Demo/ContentView.swift index 011a065..6289bf5 100644 --- a/Demo/Demo/ContentView.swift +++ b/Demo/Demo/ContentView.swift @@ -4,6 +4,8 @@ import SwiftOpenAI struct ContentView: View { @State var chatCompletionsViewModel: ChatCompletionsViewModel = .init() @State var createImagesViewModel: CreateImageViewModel = .init() + @State var editImageViewModel: EditImageViewModel = .init() + @State var variationImageViewModel: VariationImageViewModel = .init() @State var createAudioViewModel: CreateAudioViewModel = .init() @State var createTranscriptViewModel: CreateTranscriptViewModel = .init() @State var createTranslationViewModel: CreateTranslationViewModel = .init() @@ -52,6 +54,46 @@ struct ContentView: View { } } } + NavigationLink { + EditImageView(viewModel: editImageViewModel) + .navigationBarTitleDisplayMode(.large) + .navigationTitle("Edit Image") + } label: { + HStack { + Image(systemName: "photo.badge.checkmark.fill") + .foregroundStyle(.white) + .frame(width: 40, height: 40) + .padding(4) + .background(.pink.gradient) + .clipShape(RoundedRectangle(cornerRadius: 10)) + VStack(alignment: .leading) { + Text("Edit Image") + .font(.system(size: 18)) + .bold() + Text("Learn how to edit images with masks and prompts") + } + } + } + NavigationLink { + VariationImageView(viewModel: variationImageViewModel) + .navigationBarTitleDisplayMode(.large) + .navigationTitle("Variate Image") + } label: { + HStack { + Image(systemName: "die.face.6.fill") + .foregroundStyle(.white) + .frame(width: 40, height: 40) + .padding(4) + .background(.cyan.gradient) + .clipShape(RoundedRectangle(cornerRadius: 10)) + VStack(alignment: .leading) { + Text("Variation Image") + .font(.system(size: 18)) + .bold() + Text("Learn how to get a variation of images") + } + } + } NavigationLink { CreateAudioView(viewModel: createAudioViewModel) .navigationBarTitleDisplayMode(.large) diff --git a/Demo/Demo/CreateImages/CreateImageViewModel.swift b/Demo/Demo/Image/CreateImages/CreateImageViewModel.swift similarity index 100% rename from Demo/Demo/CreateImages/CreateImageViewModel.swift rename to Demo/Demo/Image/CreateImages/CreateImageViewModel.swift diff --git a/Demo/Demo/CreateImages/CreateImagesView.swift b/Demo/Demo/Image/CreateImages/CreateImagesView.swift similarity index 100% rename from Demo/Demo/CreateImages/CreateImagesView.swift rename to Demo/Demo/Image/CreateImages/CreateImagesView.swift diff --git a/Demo/Demo/CreateImages/Subviews/LoadingView.swift b/Demo/Demo/Image/CreateImages/Subviews/LoadingView.swift similarity index 100% rename from Demo/Demo/CreateImages/Subviews/LoadingView.swift rename to Demo/Demo/Image/CreateImages/Subviews/LoadingView.swift diff --git a/Demo/Demo/Image/EditImage/Dependencies/CameraView.swift b/Demo/Demo/Image/EditImage/Dependencies/CameraView.swift new file mode 100644 index 0000000..3f8f8e3 --- /dev/null +++ b/Demo/Demo/Image/EditImage/Dependencies/CameraView.swift @@ -0,0 +1,53 @@ +// +// CameraView.swift +// OpenAI +// +// Created by Home on 4/11/22. +// + +import Foundation +import UIKit +import SwiftUI + +public struct CameraView: UIViewControllerRepresentable { + @Binding var selectedImage: Image? + @Environment(\.dismiss) var dismiss + + public init(selectedImage: Binding) { + self._selectedImage = selectedImage + } + + public func makeUIViewController(context: Context) -> some UIViewController { + let imagePickerController = UIImagePickerController() + imagePickerController.delegate = context.coordinator + imagePickerController.sourceType = .camera + imagePickerController.allowsEditing = true + imagePickerController.showsCameraControls = true + return imagePickerController + } + + public func updateUIViewController(_ uiViewController: UIViewControllerType, context: Context) { + // Empty + } + + public func makeCoordinator() -> Coordinator { + Coordinator(cameraView: self) + } +} + +final public class Coordinator: NSObject, UIImagePickerControllerDelegate, UINavigationControllerDelegate { + + var cameraView: CameraView + + init(cameraView: CameraView) { + self.cameraView = cameraView + } + + public func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) { + + if let image = info[UIImagePickerController.InfoKey.editedImage] as? UIImage { + cameraView.selectedImage = Image(uiImage: image) + } + cameraView.dismiss() + } +} diff --git a/Demo/Demo/Image/EditImage/Dependencies/GalleryView.swift b/Demo/Demo/Image/EditImage/Dependencies/GalleryView.swift new file mode 100644 index 0000000..5339572 --- /dev/null +++ b/Demo/Demo/Image/EditImage/Dependencies/GalleryView.swift @@ -0,0 +1,45 @@ +import Foundation +import UIKit +import SwiftUI + +public struct GalleryView: UIViewControllerRepresentable { + @Binding var selectedImage: Image? + @Environment(\.dismiss) var dismiss + + public init(selectedImage: Binding) { + self._selectedImage = selectedImage + } + + public func makeUIViewController(context: Context) -> some UIViewController { + let imagePickerController = UIImagePickerController() + imagePickerController.delegate = context.coordinator + imagePickerController.sourceType = .photoLibrary + imagePickerController.allowsEditing = true + return imagePickerController + } + + public func updateUIViewController(_ uiViewController: UIViewControllerType, context: Context) { + // Empty + } + + public func makeCoordinator() -> GalleryCoordinator { + GalleryCoordinator(galleryView: self) + } +} + +final public class GalleryCoordinator: NSObject, UIImagePickerControllerDelegate, UINavigationControllerDelegate { + + var galleryView: GalleryView + + init(galleryView: GalleryView) { + self.galleryView = galleryView + } + + public func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) { + + if let image = info[UIImagePickerController.InfoKey.editedImage] as? UIImage { + galleryView.selectedImage = Image(uiImage: image) + } + galleryView.dismiss() + } +} diff --git a/Demo/Demo/Image/EditImage/Dependencies/Line.swift b/Demo/Demo/Image/EditImage/Dependencies/Line.swift new file mode 100644 index 0000000..1be4ea1 --- /dev/null +++ b/Demo/Demo/Image/EditImage/Dependencies/Line.swift @@ -0,0 +1,14 @@ +import Foundation +import SwiftUI + +public struct Line { + var points: [Point] + var color: Color + var width: Float + + public init(points: [Point], color: Color, width: Float) { + self.points = points + self.color = color + self.width = width + } +} diff --git a/Demo/Demo/Image/EditImage/Dependencies/Point.swift b/Demo/Demo/Image/EditImage/Dependencies/Point.swift new file mode 100644 index 0000000..6d2d7d8 --- /dev/null +++ b/Demo/Demo/Image/EditImage/Dependencies/Point.swift @@ -0,0 +1,11 @@ +import Foundation + +public struct Point { + let currentPoint: CGPoint + let lastPoint: CGPoint + + public init(currentPoint: CGPoint, lastPoint: CGPoint) { + self.currentPoint = currentPoint + self.lastPoint = lastPoint + } +} diff --git a/Demo/Demo/Image/EditImage/Dependencies/SwiftBetaCanvas.swift b/Demo/Demo/Image/EditImage/Dependencies/SwiftBetaCanvas.swift new file mode 100644 index 0000000..6a67adf --- /dev/null +++ b/Demo/Demo/Image/EditImage/Dependencies/SwiftBetaCanvas.swift @@ -0,0 +1,81 @@ +import SwiftUI + +public struct SwiftBetaCanvas: View { + @Binding var lines: [Line] + @State var points: [Point] = [] + @State var currentLine: Int = 0 + @State var currentLineColor: Color = .red + var currentLineWidth: Float + + public init(lines: Binding<[Line]>, + currentLineWidth: Float) { + self._lines = lines + self.currentLineWidth = currentLineWidth + } + + public var body: some View { + Canvas { context, _ in + createNewPath(context: context, lines: lines) + } + .gesture( + DragGesture() + .onChanged({ value in + let point = value.location + let lastPoint = points.isEmpty ? point : points.last!.currentPoint + let currentLinePoints = Point(currentPoint: point, lastPoint: lastPoint) + points.append(currentLinePoints) + + if lines.isEmpty { + let line = Line(points: [currentLinePoints], + color: currentLineColor, + width: currentLineWidth) + lines.append(line) + } else { + var line: Line? + + if currentLine >= lines.count { + line = Line(points: [currentLinePoints], + color: currentLineColor, + width: currentLineWidth) + lines.append(line!) + } else { + line = lines[currentLine] + line?.points = points + line?.color = currentLineColor + } + + if currentLine < lines.count { + lines[currentLine] = line! + } + } + }) + .onEnded({ value in + currentLine += 1 + points.removeAll() + }) + ) + .background(Color.clear) + .frame(width: 400, height: 400) + } + + private func createNewPath(context: GraphicsContext, + lines: [Line]) { + + guard !lines.isEmpty else { return } + + for line in lines { + var newPath = Path() + for point in line.points { + newPath.move(to: point.lastPoint) + newPath.addLine(to: point.currentPoint) + } + context.stroke(newPath, with: .color(line.color), style: .init(lineWidth: CGFloat(line.width), lineCap: .round, lineJoin: .round)) + } + } +} + +struct SwiftBetaCanvas_Previews: PreviewProvider { + static var previews: some View { + SwiftBetaCanvas(lines: .constant([Line]()), currentLineWidth: 16) + } +} diff --git a/Demo/Demo/Image/EditImage/Dependencies/View+ReverseMask.swift b/Demo/Demo/Image/EditImage/Dependencies/View+ReverseMask.swift new file mode 100644 index 0000000..0f4e48d --- /dev/null +++ b/Demo/Demo/Image/EditImage/Dependencies/View+ReverseMask.swift @@ -0,0 +1,14 @@ +import Foundation +import SwiftUI + +extension View { + public func reverseMask(@ViewBuilder _ mask: () -> Mask) -> some View { + self.mask { + Rectangle() + .overlay(alignment: .center) { + mask() + .blendMode(.destinationOut) + } + } + } +} diff --git a/Demo/Demo/Image/EditImage/EditImageView.swift b/Demo/Demo/Image/EditImage/EditImageView.swift new file mode 100644 index 0000000..56d196c --- /dev/null +++ b/Demo/Demo/Image/EditImage/EditImageView.swift @@ -0,0 +1,123 @@ +import SwiftUI + +struct EditImageView: View { + var viewModel = EditImageViewModel() + @State var text = "" + @State var selectedImage: Image? + @State var emptyImage: Image = Image(systemName: "photo.on.rectangle.angled") + @State var showCamera: Bool = false + @State var showGallery: Bool = false + @State var lines: [Line] = [] + @FocusState var isFocused: Bool + + var currentImage: some View { + if let selectedImage { + return selectedImage + .resizable() + .scaledToFill() + .frame(width: 300, height: 300) + } else { + return emptyImage + .resizable() + .scaledToFill() + .frame(width: 40, height: 40) + } + } + + var body: some View { + Form { + Text("Create a mask") + .font(.headline) + .padding(.vertical, 12) + + AsyncImage(url: viewModel.imageURL) { image in + image + .resizable() + .scaledToFit() + } placeholder: { + VStack { + if !viewModel.isLoading { + ZStack { + currentImage + SwiftBetaCanvas(lines: $lines, currentLineWidth: 30) + } + } else { + HStack { + Spacer() + VStack { + ProgressView() + .padding(.bottom, 12) + Text("Your image is being generated, please wait 5 seconds! πŸš€") + .multilineTextAlignment(.center) + } + Spacer() + } + + } + } + .frame(width: 300, height: 300) + } + + HStack { + Button { + showCamera.toggle() + } label: { + Text("πŸ“· Take a photo!") + } + .tint(.orange) + .buttonStyle(.borderedProminent) + .fullScreenCover(isPresented: $showCamera) { + CameraView(selectedImage: $selectedImage) + } + .padding(.vertical, 12) + + Spacer() + + Button { + showGallery.toggle() + } label: { + Text("Open Gallery") + } + .tint(.purple) + .buttonStyle(.borderedProminent) + .fullScreenCover(isPresented: $showGallery) { + GalleryView(selectedImage: $selectedImage) + } + .padding(.vertical, 12) + } + + TextField("Add a text and the AI will edit the image", + text: $text, + axis: .vertical) + .lineLimit(10) + .lineSpacing(5) + + HStack { + Spacer() + Button("πŸͺ„ Generate Image") { + isFocused = false + let selectedImageRenderer = ImageRenderer(content: currentImage) + let maskRenderer = ImageRenderer(content: currentImage.reverseMask { SwiftBetaCanvas(lines: $lines, currentLineWidth: 30) }) + + Task { + guard let selecteduiImage = selectedImageRenderer.uiImage, + let selectedPNGData = selecteduiImage.pngData(), + let maskuiImage = maskRenderer.uiImage, + let maskPNGData = maskuiImage.pngData() else { + return + } + + await viewModel.editImage(prompt: text, imageMask: selectedPNGData, maskData: maskPNGData) + } + } + .buttonStyle(.borderedProminent) + .disabled(viewModel.isLoading) + } + .padding(.vertical, 12) + } + } +} + +#Preview { + EditImageView() +} diff --git a/Demo/Demo/Image/EditImage/EditImageViewModel.swift b/Demo/Demo/Image/EditImage/EditImageViewModel.swift new file mode 100644 index 0000000..fa16661 --- /dev/null +++ b/Demo/Demo/Image/EditImage/EditImageViewModel.swift @@ -0,0 +1,29 @@ +import Foundation +import SwiftOpenAI + +@Observable +class EditImageViewModel { + private let openAI = SwiftOpenAI(apiKey: Bundle.main.getOpenAIApiKey()!) + var imageURL: URL? + var isLoading: Bool = false + + @MainActor + func editImage(prompt: String, imageMask: Data, maskData: Data) async { + isLoading = true + + do { + let editedImage = try await openAI.editImage(model: .dalle(.dalle2), imageData: imageMask, maskData: maskData, prompt: prompt, numberOfImages: 1, size: .s512) + await MainActor.run { + guard let editedImage, let urlString = editedImage.data.map({ $0.url }).last else { + isLoading = false + return + } + imageURL = URL(string: urlString) + isLoading = false + } + } catch { + isLoading = false + print("Error creating edit image", error.localizedDescription) + } + } +} diff --git a/Demo/Demo/Image/VariationImage/VariationImageView.swift b/Demo/Demo/Image/VariationImage/VariationImageView.swift new file mode 100644 index 0000000..345001b --- /dev/null +++ b/Demo/Demo/Image/VariationImage/VariationImageView.swift @@ -0,0 +1,108 @@ +import SwiftUI + +struct VariationImageView: View { + var viewModel = VariationImageViewModel() + @State var selectedImage: Image? + @State var emptyImage: Image = Image(systemName: "photo.on.rectangle.angled") + @State var showCamera: Bool = false + @State var showGallery: Bool = false + + var currentImage: some View { + if let selectedImage { + return selectedImage + .resizable() + .scaledToFill() + .frame(width: 300, height: 300) + } else { + return emptyImage + .resizable() + .scaledToFill() + .frame(width: 40, height: 40) + } + } + + var body: some View { + Form { + Text("Create a variation of the selected image") + .font(.headline) + .padding(.vertical, 12) + + AsyncImage(url: viewModel.imageURL) { image in + image + .resizable() + .scaledToFit() + } placeholder: { + VStack { + if !viewModel.isLoading { + ZStack { + currentImage + } + } else { + HStack { + Spacer() + VStack { + ProgressView() + .padding(.bottom, 12) + Text("Your image is being generated, please wait 5 seconds! πŸš€") + .multilineTextAlignment(.center) + } + Spacer() + } + } + } + .frame(width: 300, height: 300) + } + + HStack { + Button { + showCamera.toggle() + } label: { + Text("πŸ“· Take a photo!") + } + .tint(.orange) + .buttonStyle(.borderedProminent) + .fullScreenCover(isPresented: $showCamera) { + CameraView(selectedImage: $selectedImage) + } + .padding(.vertical, 12) + + Spacer() + + Button { + showGallery.toggle() + } label: { + Text("Open Gallery") + } + .tint(.purple) + .buttonStyle(.borderedProminent) + .fullScreenCover(isPresented: $showGallery) { + GalleryView(selectedImage: $selectedImage) + } + .padding(.vertical, 12) + } + + HStack { + Spacer() + Button("πŸͺ„ Generate Image") { + let selectedImageRenderer = ImageRenderer(content: currentImage) + + Task { + guard let selecteduiImage = selectedImageRenderer.uiImage, + let selectedPNGData = selecteduiImage.pngData() else { + return + } + + await viewModel.variationImage(imageMask: selectedPNGData) + } + } + .buttonStyle(.borderedProminent) + .disabled(viewModel.isLoading) + } + .padding(.vertical, 12) + } + } +} + +#Preview { + VariationImageView() +} diff --git a/Demo/Demo/Image/VariationImage/VariationImageViewModel.swift b/Demo/Demo/Image/VariationImage/VariationImageViewModel.swift new file mode 100644 index 0000000..758dc3b --- /dev/null +++ b/Demo/Demo/Image/VariationImage/VariationImageViewModel.swift @@ -0,0 +1,30 @@ +import Foundation +import SwiftOpenAI + +@Observable +class VariationImageViewModel { + private let openAI = SwiftOpenAI(apiKey: Bundle.main.getOpenAIApiKey()!) + var imageURL: URL? + var isLoading: Bool = false + + @MainActor + func variationImage(imageMask: Data) async { + isLoading = true + + do { + let variationImage = try await openAI.variationImage(model: .dalle(.dalle2), imageData: imageMask, numberOfImages: 1, size: .s512) + + await MainActor.run { + guard let variationImage, let urlString = variationImage.data.map({ $0.url }).last else { + isLoading = false + return + } + imageURL = URL(string: urlString) + isLoading = false + } + } catch { + isLoading = false + print("Error creating variation image", error.localizedDescription) + } + } +} diff --git a/README.md b/README.md index a9a009f..2197aa7 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,8 @@ This is a Swift community-driven repository for interfacing with the [OpenAI](ht - [Secure your API Key using a .plist](#secure-your-api-key-using-a-plist) - [Images](#images) - [Create Image](#create-image) + - [Edit Image](#edit-image) + - [Variation Image](#variation-image) - [Audio](#audio) - [Create Speech](#create-speech) - [Create Transcription](#create-transcription) @@ -140,6 +142,62 @@ do { } ``` +### [Edit Image](https://platform.openai.com/docs/api-reference/images/createVariation) +Creates an edited or extended image given an original image and a prompt. + +```swift +do { + // Attempt to edit an image using OpenAI's DALL-E 2 model. + let modelType = OpenAIImageModelType.dalle(.dalle2) // Specify the DALL-E 2 model. + let imageData = yourImageData // Binary data of the image to be edited. + let maskData = yourMaskData // Binary data of the mask to be applied. + let promptText = "A futuristic cityscape." // Describe the desired modifications. + let numberOfImages = 3 // Request multiple edited image variations. + let imageSize: ImageSize = .s1024 // Specify the size of the generated images. + + // Request the edited images and process them. + if let editedImages = try await openAI.editImage( + model: modelType, + imageData: imageData, + maskData: maskData, + prompt: promptText, + numberOfImages: numberOfImages, + size: imageSize + ) { + print("Received edited images: \(editedImages)") + } +} catch { + // Handle any errors that occur during the image editing process. + print("Error editing image: \(error)") +} +``` + +### [Variation Image](https://platform.openai.com/docs/api-reference/images/createVariation) +Generate variations of a provided image using a specific model with the OpenAI API, utilizing DALLΒ·E 2 + +```swift +do { + // Attempt to create image variations using OpenAI's DALL-E 2 model. + let modelType = OpenAIImageModelType.dalle(.dalle2) // Specify the DALL-E 2 model. + let imageData = yourImageData // Binary data of the original image to be varied. + let numberOfImages = 5 // Request multiple image variations. + let imageSize: ImageSize = .s1024 // Specify the size of the generated images. + + // Request the image variations and process them. + if let imageVariations = try await openAI.variationImage( + model: modelType, + imageData: imageData, + numberOfImages: numberOfImages, + size: imageSize + ) { + print("Received image variations: \(imageVariations)") + } +} catch { + // Handle any errors that occur during the image variation creation process. + print("Error generating image variations: \(error)") +} +``` + ## [Audio](https://platform.openai.com/docs/api-reference/audio) ### [Create Speech](https://platform.openai.com/docs/api-reference/audio/createSpeech) Generates audio from the input text. You can specify the voice and responseFormat diff --git a/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/CreateSpeechEndpoint.swift b/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Audio/CreateSpeechEndpoint.swift similarity index 100% rename from Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/CreateSpeechEndpoint.swift rename to Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Audio/CreateSpeechEndpoint.swift diff --git a/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/CreateTranscriptionEndpoint.swift b/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Audio/CreateTranscriptionEndpoint.swift similarity index 100% rename from Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/CreateTranscriptionEndpoint.swift rename to Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Audio/CreateTranscriptionEndpoint.swift diff --git a/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/CreateTranslationEndpoint.swift b/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Audio/CreateTranslationEndpoint.swift similarity index 100% rename from Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/CreateTranslationEndpoint.swift rename to Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Audio/CreateTranslationEndpoint.swift diff --git a/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/ChatCompletionsEndpoint.swift b/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Chat/ChatCompletionsEndpoint.swift similarity index 100% rename from Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/ChatCompletionsEndpoint.swift rename to Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Chat/ChatCompletionsEndpoint.swift diff --git a/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/ChatCompletionsImageInputEndpoint.swift b/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Chat/ChatCompletionsImageInputEndpoint.swift similarity index 100% rename from Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/ChatCompletionsImageInputEndpoint.swift rename to Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Chat/ChatCompletionsImageInputEndpoint.swift diff --git a/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/CompletionsEndpoint.swift b/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Chat/CompletionsEndpoint.swift similarity index 100% rename from Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/CompletionsEndpoint.swift rename to Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Chat/CompletionsEndpoint.swift diff --git a/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/EmbeddingsEndpoint.swift b/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Embeddings/EmbeddingsEndpoint.swift similarity index 100% rename from Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/EmbeddingsEndpoint.swift rename to Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Embeddings/EmbeddingsEndpoint.swift diff --git a/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/CreateImageEndpoint.swift b/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Image/CreateImageEndpoint.swift similarity index 100% rename from Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/CreateImageEndpoint.swift rename to Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Image/CreateImageEndpoint.swift diff --git a/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Image/EditImageEndpoint.swift b/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Image/EditImageEndpoint.swift new file mode 100644 index 0000000..5a86b97 --- /dev/null +++ b/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Image/EditImageEndpoint.swift @@ -0,0 +1,19 @@ +import Foundation + +struct EditImageEndpoint: Endpoint { + private let model: OpenAIImageModelType + + var method: HTTPMethod { + .POST + } + + var path: String = "images/edits" + + init(model: OpenAIImageModelType) { + self.model = model + } + + var parameters: [String: Any]? { + ["model": self.model.name as Any] + } +} diff --git a/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Image/VariationImageEndpoint.swift b/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Image/VariationImageEndpoint.swift new file mode 100644 index 0000000..b7ca4ff --- /dev/null +++ b/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Image/VariationImageEndpoint.swift @@ -0,0 +1,19 @@ +import Foundation + +struct VariationImageEndpoint: Endpoint { + private let model: OpenAIImageModelType + + var method: HTTPMethod { + .POST + } + + var path: String = "images/variations" + + init(model: OpenAIImageModelType) { + self.model = model + } + + var parameters: [String: Any]? { + ["model": self.model.name as Any] + } +} diff --git a/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/ListModelsEndpoint.swift b/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Models/ListModelsEndpoint.swift similarity index 100% rename from Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/ListModelsEndpoint.swift rename to Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Models/ListModelsEndpoint.swift diff --git a/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/ModerationEndpoint.swift b/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Moderation/ModerationEndpoint.swift similarity index 100% rename from Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/ModerationEndpoint.swift rename to Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/List/Moderation/ModerationEndpoint.swift diff --git a/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/OpenAIEndpoints.swift b/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/OpenAIEndpoints.swift index 9cf5042..ae38d71 100644 --- a/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/OpenAIEndpoints.swift +++ b/Sources/SwiftOpenAI/OpenAI/OpenAIEndpoints/OpenAIEndpoints.swift @@ -2,31 +2,16 @@ import Foundation enum OpenAIEndpoints { case listModels - - case completions(model: OpenAIModelType, - optionalParameters: CompletionsOptionalParameters?) - - case chatCompletions(model: OpenAIModelType, - messages: [MessageChatGPT], - optionalParameters: ChatCompletionsOptionalParameters?) - - case chatCompletionsWithImageInput(model: OpenAIModelType, - messages: [MessageChatImageInput], - optionalParameters: ChatCompletionsOptionalParameters?) - - case createImage(model: OpenAIImageModelType, - prompt: String, - numberOfImages: Int, - size: ImageSize) - + case completions(model: OpenAIModelType, optionalParameters: CompletionsOptionalParameters?) + case chatCompletions(model: OpenAIModelType, messages: [MessageChatGPT], optionalParameters: ChatCompletionsOptionalParameters?) + case chatCompletionsWithImageInput(model: OpenAIModelType, messages: [MessageChatImageInput], optionalParameters: ChatCompletionsOptionalParameters?) + case createImage(model: OpenAIImageModelType, prompt: String, numberOfImages: Int, size: ImageSize) + case editImage(model: OpenAIImageModelType) + case variationImage(model: OpenAIImageModelType) case embeddings(model: OpenAIModelType, input: String) - case moderations(input: String) - case createSpeech(model: OpenAITTSModelType, input: String, voice: OpenAIVoiceType, responseFormat: OpenAIAudioResponseType, speed: Double) - case createTranscription(file: Data, model: OpenAITranscriptionModelType, language: String, prompt: String, responseFormat: OpenAIAudioResponseType, temperature: Double) - case createTranslation(file: Data, model: OpenAITranscriptionModelType, prompt: String, responseFormat: OpenAIAudioResponseType, temperature: Double) public var endpoint: Endpoint { @@ -74,6 +59,10 @@ enum OpenAIEndpoints { prompt: prompt, responseFormat: responseFormat, temperature: temperature) + case .editImage(model: let model): + return EditImageEndpoint(model: model) + case .variationImage(model: let model): + return VariationImageEndpoint(model: model) } } } diff --git a/Sources/SwiftOpenAI/OpenAI/Requests/Audio/CreateTranscriptionRequest.swift b/Sources/SwiftOpenAI/OpenAI/Requests/Audio/CreateTranscriptionRequest.swift index d58772c..1a8d8ca 100644 --- a/Sources/SwiftOpenAI/OpenAI/Requests/Audio/CreateTranscriptionRequest.swift +++ b/Sources/SwiftOpenAI/OpenAI/Requests/Audio/CreateTranscriptionRequest.swift @@ -44,28 +44,19 @@ final public class CreateTranscriptionRequest: NSObject, CreateTranscriptionRequ var endpoint = OpenAIEndpoints.createTranscription(file: file, model: model, language: language, prompt: prompt, responseFormat: responseFormat, temperature: temperature).endpoint api.routeEndpoint(&endpoint, environment: OpenAIEnvironmentV1()) - let boundary = UUID().uuidString + let boundary = "Boundary-\(UUID().uuidString)" var urlRequest = api.buildURLRequest(endpoint: endpoint) api.addHeaders(urlRequest: &urlRequest, headers: ["Content-Type": "multipart/form-data; boundary=\(boundary)", "Authorization": "Bearer \(apiKey)"]) - var body = Data() + let formData = MultipartFormData(boundary: boundary) + formData.appendField(name: "model", value: "whisper-1") + formData.appendImageData(fieldName: "file", data: file, filename: "steve.mp4", mimeType: "audio/mpeg") + formData.finalizeBody() - body.append("--\(boundary)\r\n".data(using: .utf8)!) - body.append("Content-Disposition: form-data; name=\"model\"\r\n\r\n".data(using: .utf8)!) - body.append("whisper-1\r\n".data(using: .utf8)!) - - body.append("--\(boundary)\r\n".data(using: .utf8)!) - body.append("Content-Disposition: form-data; name=\"file\"; filename=\"steve.mp4\"\r\n".data(using: .utf8)!) - body.append("Content-Type: audio/mpeg\r\n\r\n".data(using: .utf8)!) - body.append(file) - body.append("\r\n".data(using: .utf8)!) - - body.append("--\(boundary)--\r\n".data(using: .utf8)!) - - urlRequest.httpBody = body + urlRequest.httpBody = formData.getHttpBody() self.urlSession = URLSession(configuration: .default, delegate: self, diff --git a/Sources/SwiftOpenAI/OpenAI/Requests/Images/EditImageRequest.swift b/Sources/SwiftOpenAI/OpenAI/Requests/Images/EditImageRequest.swift new file mode 100644 index 0000000..efc7e92 --- /dev/null +++ b/Sources/SwiftOpenAI/OpenAI/Requests/Images/EditImageRequest.swift @@ -0,0 +1,63 @@ +import Foundation + +protocol EditImageRequestProtocol { + func execute(api: API, + apiKey: String, + model: OpenAIImageModelType, + imageData: Data, + maskData: Data, + prompt: String, + numberOfImages: Int, + size: ImageSize) async throws -> CreateImageDataModel? +} + +final public class EditImageRequest: NSObject, EditImageRequestProtocol { + public typealias Init = (_ api: API, + _ apiKey: String, + _ model: OpenAIImageModelType, + _ imageData: Data, + _ maskData: Data, + _ prompt: String, + _ numberOfImages: Int, + _ size: ImageSize) async throws -> CreateImageDataModel? + + public override init() { + super.init() + } + + public func execute(api: API, + apiKey: String, + model: OpenAIImageModelType, + imageData: Data, + maskData: Data, + prompt: String, + numberOfImages: Int, + size: ImageSize) async throws -> CreateImageDataModel? { + + var endpoint = OpenAIEndpoints.editImage(model: model).endpoint + api.routeEndpoint(&endpoint, environment: OpenAIEnvironmentV1()) + + let boundary = "Boundary-\(UUID().uuidString)" + + var urlRequest = api.buildURLRequest(endpoint: endpoint) + api.addHeaders(urlRequest: &urlRequest, + headers: ["Content-Type": "multipart/form-data; boundary=\(boundary)", + "Authorization": "Bearer \(apiKey)"]) + + let formData = MultipartFormData(boundary: boundary) + + formData.appendField(name: "prompt", value: prompt) + formData.appendField(name: "n", value: String(numberOfImages)) + formData.appendField(name: "size", value: size.rawValue) + formData.appendImageData(fieldName: "image", data: imageData, filename: "image.png", mimeType: "image/png") + formData.appendImageData(fieldName: "mask", data: maskData, filename: "mask.png", mimeType: "image/png") + formData.finalizeBody() + + urlRequest.httpBody = formData.getHttpBody() + + let (data, _) = try await URLSession.shared.data(for: urlRequest) + let variationImageDataModel = try JSONDecoder().decode(CreateImageDataModel.self, from: data) + + return variationImageDataModel + } +} diff --git a/Sources/SwiftOpenAI/OpenAI/Requests/Images/VariationImageRequest.swift b/Sources/SwiftOpenAI/OpenAI/Requests/Images/VariationImageRequest.swift new file mode 100644 index 0000000..ce8252a --- /dev/null +++ b/Sources/SwiftOpenAI/OpenAI/Requests/Images/VariationImageRequest.swift @@ -0,0 +1,53 @@ +import Foundation + +protocol VariationImageRequestProtocol { + func execute(api: API, + apiKey: String, + model: OpenAIImageModelType, + imageData: Data, + numberOfImages: Int, + size: ImageSize) async throws -> CreateImageDataModel? +} + +final public class VariationImageRequest: VariationImageRequestProtocol { + public typealias Init = (_ api: API, + _ apiKey: String, + _ model: OpenAIImageModelType, + _ imageData: Data, + _ numberOfImages: Int, + _ size: ImageSize) async throws -> CreateImageDataModel? + + public init() {} + + public func execute(api: API, + apiKey: String, + model: OpenAIImageModelType, + imageData: Data, + numberOfImages: Int, + size: ImageSize) async throws -> CreateImageDataModel? { + + var endpoint = OpenAIEndpoints.variationImage(model: model).endpoint + api.routeEndpoint(&endpoint, environment: OpenAIEnvironmentV1()) + + let boundary = "Boundary-\(UUID().uuidString)" + + var urlRequest = api.buildURLRequest(endpoint: endpoint) + api.addHeaders(urlRequest: &urlRequest, + headers: ["Content-Type": "multipart/form-data; boundary=\(boundary)", + "Authorization": "Bearer \(apiKey)"]) + + let formData = MultipartFormData(boundary: boundary) + + formData.appendField(name: "n", value: String(numberOfImages)) + formData.appendField(name: "size", value: size.rawValue) + formData.appendImageData(fieldName: "image", data: imageData, filename: "image.png", mimeType: "image/png") + formData.finalizeBody() + + urlRequest.httpBody = formData.getHttpBody() + + let (data, _) = try await URLSession.shared.data(for: urlRequest) + let variationImageDataModel = try JSONDecoder().decode(CreateImageDataModel.self, from: data) + + return variationImageDataModel + } +} diff --git a/Sources/SwiftOpenAI/OpenAI/Requests/MultipartFormData.swift b/Sources/SwiftOpenAI/OpenAI/Requests/MultipartFormData.swift new file mode 100644 index 0000000..c5c0f04 --- /dev/null +++ b/Sources/SwiftOpenAI/OpenAI/Requests/MultipartFormData.swift @@ -0,0 +1,51 @@ +import Foundation + +final public class MultipartFormData { + private var body: Data = Data() + private let boundary: String + + public init(boundary: String) { + self.boundary = boundary + } + + public func appendField(name: String, value: String, filename: String? = nil, mimeType: String? = nil) { + var disposition = "Content-Disposition: form-data; name=\"\(name)\"" + if let filename = filename { + disposition += "; filename=\"\(filename)\"" + } + + append("--\(boundary)\r\n") + append("\(disposition)\r\n") + + if let mimeType = mimeType { + append("Content-Type: \(mimeType)\r\n\r\n") + } else { + append("\r\n") + } + + append(value) + append("\r\n") + } + + public func appendImageData(fieldName: String, data: Data, filename: String, mimeType: String) { + append("--\(boundary)\r\n") + append("Content-Disposition: form-data; name=\"\(fieldName)\"; filename=\"\(filename)\"\r\n") + append("Content-Type: \(mimeType)\r\n\r\n") + body.append(data) + append("\r\n") + } + + public func finalizeBody() { + append("--\(boundary)--\r\n") + } + + public func getHttpBody() -> Data { + return body + } + + private func append(_ string: String) { + if let data = string.data(using: .utf8) { + body.append(data) + } + } +} diff --git a/Sources/SwiftOpenAI/OpenAI/SwiftOpenAI.swift b/Sources/SwiftOpenAI/OpenAI/SwiftOpenAI.swift index 9745948..e1ebae3 100644 --- a/Sources/SwiftOpenAI/OpenAI/SwiftOpenAI.swift +++ b/Sources/SwiftOpenAI/OpenAI/SwiftOpenAI.swift @@ -2,35 +2,21 @@ import Foundation protocol OpenAIProtocol { func listModels() async throws -> ModelListDataModel? - - func completions(model: OpenAIModelType, - optionalParameters: CompletionsOptionalParameters?) async throws -> CompletionsDataModel? - - func createChatCompletions(model: OpenAIModelType, - messages: [MessageChatGPT], - optionalParameters: ChatCompletionsOptionalParameters?) async throws -> ChatCompletionsDataModel? - - func createChatCompletionsWithImageInput(model: OpenAIModelType, - messages: [MessageChatImageInput], - optionalParameters: ChatCompletionsOptionalParameters?) async throws -> ChatCompletionsDataModel? - - func createChatCompletionsStream(model: OpenAIModelType, - messages: [MessageChatGPT], - optionalParameters: ChatCompletionsOptionalParameters?) + func completions(model: OpenAIModelType, optionalParameters: CompletionsOptionalParameters?) async throws -> CompletionsDataModel? + func createChatCompletions(model: OpenAIModelType, messages: [MessageChatGPT], optionalParameters: ChatCompletionsOptionalParameters?) async throws -> ChatCompletionsDataModel? + func createChatCompletionsWithImageInput(model: OpenAIModelType, messages: [MessageChatImageInput], optionalParameters: ChatCompletionsOptionalParameters?) + async throws -> ChatCompletionsDataModel? + func createChatCompletionsStream(model: OpenAIModelType, messages: [MessageChatGPT], optionalParameters: ChatCompletionsOptionalParameters?) async throws -> AsyncThrowingStream - func createImages(model: OpenAIImageModelType, prompt: String, numberOfImages: Int, size: ImageSize) async throws -> CreateImageDataModel? - + func editImage(model: OpenAIImageModelType, imageData: Data, maskData: Data, prompt: String, numberOfImages: Int, size: ImageSize) async throws -> CreateImageDataModel? + func variationImage(model: OpenAIImageModelType, imageData: Data, numberOfImages: Int, size: ImageSize) async throws -> CreateImageDataModel? func embeddings(model: OpenAIModelType, input: String) async throws -> EmbeddingResponseDataModel? - func moderations(input: String) async throws -> ModerationDataModel? - func createSpeech(model: OpenAITTSModelType, input: String, voice: OpenAIVoiceType, responseFormat: OpenAIAudioResponseType, speed: Double) async throws -> Data? - - func createTranscription(model: OpenAITranscriptionModelType, file: Data, language: String, prompt: String, responseFormat: OpenAIAudioResponseType, temperature: Double) async throws -> AsyncThrowingStream - + func createTranscription(model: OpenAITranscriptionModelType, file: Data, language: String, prompt: String, responseFormat: OpenAIAudioResponseType, temperature: Double) + async throws -> AsyncThrowingStream func createTranslation(model: OpenAITranscriptionModelType, file: Data, prompt: String, responseFormat: OpenAIAudioResponseType, temperature: Double) async throws -> AsyncThrowingStream - } // swiftlint:disable line_length @@ -44,6 +30,8 @@ public class SwiftOpenAI: OpenAIProtocol { private let createChatCompletionsImageInputRequest: CreateChatCompletionsImageInputRequest.Init private let createChatCompletionsStreamRequest: CreateChatCompletionsStreamRequest.Init private let createImagesRequest: CreateImagesRequest.Init + private let editImageRequest: EditImageRequest.Init + private let variationImageRequest: VariationImageRequest.Init private let embeddingsRequest: EmbeddingsRequest.Init private let moderationsRequest: ModerationsRequest.Init private let createSpeechRequest: CreateSpeechRequest.Init @@ -58,6 +46,8 @@ public class SwiftOpenAI: OpenAIProtocol { createChatCompletionsImageInputRequest: @escaping CreateChatCompletionsImageInputRequest.Init = CreateChatCompletionsImageInputRequest().execute, createChatCompletionsStreamRequest: @escaping CreateChatCompletionsStreamRequest.Init = CreateChatCompletionsStreamRequest().execute, createImagesRequest: @escaping CreateImagesRequest.Init = CreateImagesRequest().execute, + editImageRequest: @escaping EditImageRequest.Init = EditImageRequest().execute, + variationImageRequest: @escaping VariationImageRequest.Init = VariationImageRequest().execute, embeddingsRequest: @escaping EmbeddingsRequest.Init = EmbeddingsRequest().execute, moderationsRequest: @escaping ModerationsRequest.Init = ModerationsRequest().execute, createSpeechRequest: @escaping CreateSpeechRequest.Init = CreateSpeechRequest().execute, @@ -71,6 +61,8 @@ public class SwiftOpenAI: OpenAIProtocol { self.createChatCompletionsImageInputRequest = createChatCompletionsImageInputRequest self.createChatCompletionsStreamRequest = createChatCompletionsStreamRequest self.createImagesRequest = createImagesRequest + self.editImageRequest = editImageRequest + self.variationImageRequest = variationImageRequest self.embeddingsRequest = embeddingsRequest self.moderationsRequest = moderationsRequest self.createSpeechRequest = createSpeechRequest @@ -284,6 +276,80 @@ public class SwiftOpenAI: OpenAIProtocol { public func createImages(model: OpenAIImageModelType, prompt: String, numberOfImages: Int, size: ImageSize) async throws -> CreateImageDataModel? { try await createImagesRequest(api, apiKey, model, prompt, numberOfImages, size) } + + /** + Edits images based on a provided model, input image, mask, and textual prompt using the OpenAI API. + + This method leverages the OpenAI API to modify images according to a specific model, overlaying changes as directed by the input mask and guided by the textual prompt. The function supports multiple outputs, allowing the generation of several variations based on the number of images requested. + + - Parameters: + - model: An `OpenAIImageModelType` value representing the specific model to be used for image editing. + - imageData: A `Data` object containing the binary data of the image to be edited. + - maskData: A `Data` object containing the binary data of the mask to be applied to the image. + - prompt: A `String` that describes the desired modifications or thematic elements to be reflected in the edited image. + - numberOfImages: An `Int` indicating the number of edited image variations to be generated. + - size: An `ImageSize` value that specifies the resolution of the output images. + + - Throws: An error if the API call fails or if there is an issue with parsing the JSON response. + + - Returns: A `CreateImageDataModel` object containing the edited images. + + Example usage: + + let modelType = OpenAIImageModelType.dalle2 + let imageData = yourImageData + let maskData = yourMaskData + let promptText = "A futuristic cityscape." + let numberOfImages = 3 + let imageSize: ImageSize = .s1024 + + do { + if let editedImages = try await editImage(model: modelType, imageData: imageData, maskData: maskData, prompt: promptText, numberOfImages: numberOfImages, size: imageSize) { + print("Received edited images: \(editedImages)") + } + } catch { + print("Error editing image: \(error)") + } + + */ + public func editImage(model: OpenAIImageModelType, imageData: Data, maskData: Data, prompt: String, numberOfImages: Int, size: ImageSize) async throws -> CreateImageDataModel? { + try await editImageRequest(api, apiKey, model, imageData, maskData, prompt, numberOfImages, size) + } + + /** + Generates variations of a provided image using a specified model with the OpenAI API. + + This method utilizes the OpenAI API to create multiple variations of a given image based on the specified model. It processes the input image data and generates the desired number of image variations, maintaining the specified resolution for the output images. + + - Parameters: + - model: An `OpenAIImageModelType` value representing the specific model to be used for generating image variations. + - imageData: A `Data` object containing the binary data of the original image to be varied. + - numberOfImages: An `Int` indicating the number of image variations to be generated. + - size: An `ImageSize` value that specifies the resolution of the output images. + + - Throws: An error if the API call fails or if there is an issue with parsing the JSON response. + + - Returns: A `CreateImageDataModel` object containing the image variations. + + Example usage: + + let modelType = OpenAIImageModelType.dalle2 + let imageData = yourImageData + let numberOfImages = 5 + let imageSize: ImageSize = .s1024 + + do { + if let imageVariations = try await variationImage(model: modelType, imageData: imageData, numberOfImages: numberOfImages, size: imageSize) { + print("Received image variations: \(imageVariations)") + } + } catch { + print("Error generating image variations: \(error)") + } + + */ + public func variationImage(model: OpenAIImageModelType, imageData: Data, numberOfImages: Int, size: ImageSize) async throws -> CreateImageDataModel? { + try await variationImageRequest(api, apiKey, model, imageData, numberOfImages, size) + } /** Generates embeddings for a given input string using the specified OpenAI model.