Skip to content

Commit

Permalink
Create julia examples.
Browse files Browse the repository at this point in the history
  • Loading branch information
inf3rnus committed Nov 8, 2024
1 parent b9a7865 commit e0f6101
Show file tree
Hide file tree
Showing 31 changed files with 1,053 additions and 0 deletions.
35 changes: 35 additions & 0 deletions examples/julia/tasks/audio_classification/audio_classification.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
using Bytez
using Base64
using HTTP


# Function to get the audio as base64-encoded string
function get_base64_audio(url::String)::String
response = HTTP.get(url)
return base64encode(response.body)
end

# Get the base64 encoded audio from the URL
input_audio_base64 = get_base64_audio(
"https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/audio-classification/audio.wav",
)

# Initialize the Bytez client with your API key
client = Bytez.init("YOUR BYTEZ KEY HERE")

# Load the model
model = client.model("aaraki/wav2vec2-base-finetuned-ks")

model.load()

# Run inference on the model
result = model.run(Dict("b64AudioBufferWav" => input_audio_base64))

# Process and print the results
label_objects = result["output"]

for label_object in label_objects
score = label_object["score"]
label = label_object["label"]
println("Score: $score, Label: $label")
end
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
using Bytez
using Base64
using HTTP

function get_base64_audio(url::String)::String
response = HTTP.get(url)
return base64encode(response.body)
end

input_audio_base64 = get_base64_audio(
"https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/audio-classification/audio.wav",
)

client = Bytez.init("YOUR BYTEZ KEY HERE")

model = client.model("facebook/data2vec-audio-base-960h")

model.load()

result = model.run(Dict("b64AudioBufferWav" => input_audio_base64))

output = result["output"]

# depending on the model, there may be additional props returned
println(output)

text = output["text"]

println("Inference is: $text")

a = 2

28 changes: 28 additions & 0 deletions examples/julia/tasks/chat_models/chat_models.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
using Bytez

client = Bytez.init("YOUR BYTEZ KEY HERE")

model = client.model("microsoft/Phi-3-mini-4k-instruct")

model.load()

messages = [
Dict("role" => "system", "content" => "You are a friendly chatbot"),
Dict("role" => "user", "content" => "What is the capital of England?"),
]

result = model.run(messages, Dict("max_length" => 100))

output = result["output"]

generated_text = output[1]["generated_text"]

for message in generated_text
# depending on the model, there may be additional props returned
println(message)

content = message["content"]
role = message["role"]

println(Dict("content" => content, "role" => role))
end
50 changes: 50 additions & 0 deletions examples/julia/tasks/depth_estimation/depth_estimation.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
using Bytez
using Base64
using HTTP
using Printf

WORKING_DIR = dirname(@__FILE__)

client = Bytez.init("YOUR BYTEZ KEY HERE")

model = client.model("vinvino02/glpn-nyu")

model.load()

input_image_url = "https://as1.ftcdn.net/v2/jpg/03/03/55/82/1000_F_303558268_YNUQp9NNMTE0X4zrj314mbWcDHd1pZPD.jpg"

# Run the model with the input image
result = model.run(input_image_url)

output = result["output"]

# depending on the model, there may be additional props returned
depth_png = output["depth_png"]
formatted_predicted_depth_array = output["formatted_predicted_depth_array"]

##### Decode and save the image #####
img_buffer = base64decode(depth_png)

image_path = joinpath(WORKING_DIR, "testImage.png")
open(image_path, "w") do f
write(f, img_buffer)
end

# Write the original image for comparison
original_image_path = joinpath(WORKING_DIR, "originalImage.jpg")
response = HTTP.get(input_image_url)

open(original_image_path, "w") do f
write(f, response.body)
end

println("Wrote the original image to: ", original_image_path)
println("Wrote the inference image to: ", image_path)

##### 2D depth map, object representation of the pixel values for the depth map #####
rows = formatted_predicted_depth_array
for (j, row) in enumerate(rows)
for (i, pixel) in enumerate(row)
# insert code here if you need these values directly
end
end
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
using Bytez
using Printf

WORKING_DIR = dirname(@__FILE__)

client = Bytez.init("YOUR BYTEZ KEY HERE")

model = client.model("cloudqi/CQI_Visual_Question_Awnser_PT_v0")

model.load()

input_data = Dict(
"image" => "https://templates.invoicehome.com/invoice-template-us-neat-750px.png",
"question" => "What's the total cost?",
)

result = model.run(input_data)

output = result["output"]

# depending on the model, there may be additional props returned
println(output)

output_object = output[1]

answer = output_object["answer"]
score = output_object["score"]
start = output_object["start"]
# end is a reserved keyword in julia
_end = output_object["end"]

println(Dict("answer" => answer, "score" => score, "start" => start, "end" => _end))
20 changes: 20 additions & 0 deletions examples/julia/tasks/feature_extraction/feature_extraction.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
using Bytez

client = Bytez.init("YOUR BYTEZ KEY HERE")

model = client.model("Salesforce/SFR-Embedding-2_R")

model.load()

input_text = "Your text for feature extraction goes here..."

result = model.run(input_text)

output = result["output"]

# depending on the model, there may be additional props returned
println(output)

embedding = output[1]

println(embedding)
25 changes: 25 additions & 0 deletions examples/julia/tasks/fill_mask/fill_mask.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
using Bytez

client = Bytez.init("YOUR BYTEZ KEY HERE")

model = client.model("almanach/camembert-base")

model.load()

input_text = "The capital of France is <mask>."

result = model.run(input_text)

sequence_objects = result["output"]

for sequence_object in sequence_objects
# depending on the model, there may be additional props returned
println(sequence_object)

sequence = sequence_object["sequence"]
score = sequence_object["score"]
token = sequence_object["token"]
token_str = sequence_object["token_str"]

println(Dict("sequence" => sequence, "score" => score, "token" => token, "token_str" => token_str))
end
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
using Bytez

client = Bytez.init("YOUR BYTEZ KEY HERE")

model = client.model("Nexusflow/NexusRaven-V2-13B")

model.load()

input_text = "What's the weather like in Seattle right now?"

options = Dict(
"params" => Dict(
"max_new_tokens" => 20,
"min_new_tokens" => 50,
"temperature" => 0.001,
"do_sample" => false),
"stream" => true,
)

prompt_template = """
Function:
def get_weather_data(coordinates):
\"\"\"
Fetches weather data from the Open-Meteo API for the given latitude and longitude.
Args:
coordinates (tuple): The latitude and longitude of the location.
Returns:
float: The current temperature in the coordinates you've asked for
\"""
Function:
def get_coordinates_from_city(city_name):
\"""
Fetches the latitude and longitude of a given city name using the Maps.co Geocoding API.
Args:
city_name (str): The name of the city.
Returns:
tuple: The latitude and longitude of the city.
\"""
User Query: {query}<human_end>
"""

# Prepare the prompt with the user query
prompt = replace(prompt_template, "{query}" => input_text)

stream = model.run(prompt, options)

while isopen(stream)
item = take!(stream) # Take each item as it enters the channel
println(item) # Print the item
end
23 changes: 23 additions & 0 deletions examples/julia/tasks/image_classification/image_classification.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
using Bytez

client = Bytez.init("YOUR BYTEZ KEY HERE")

model = client.model("google/vit-base-patch16-224")

model.load()

input_image_url = "https://www.padoniavets.com/sites/default/files/field/image/cats-and-dogs.jpg"

result = model.run(input_image_url)

labelObjects = result["output"]

for labelObject in labelObjects
# depending on the model, there may be additional props returned
println(labelObject)

label = labelObject["label"]
score = labelObject["score"]

println(Dict("label" => label, "score" => score))
end
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
using Bytez

client = Bytez.init("YOUR BYTEZ KEY HERE")

model = client.model("nomic-ai/nomic-embed-vision-v1")

model.load()

input_image_url = "https://as1.ftcdn.net/v2/jpg/03/03/55/82/1000_F_303558268_YNUQp9NNMTE0X4zrj314mbWcDHd1pZPD.jpg"

result = model.run(input_image_url)

output = result["output"]

# depending on the model, there may be additional props returned
println(output)

embedding = output[1]

println(embedding)
34 changes: 34 additions & 0 deletions examples/julia/tasks/image_segmentation/image_segmentation.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
using Bytez
using Base64
using Printf

WORKING_DIR = dirname(@__FILE__)

client = Bytez.init("YOUR BYTEZ KEY HERE")

model = client.model("sayeed99/segformer-b3-fashion")

model.load()

input_image_url = "https://ocean.si.edu/sites/default/files/styles/3_2_largest/public/2023-11/Screen_Shot_2018-04-16_at_1_42_56_PM.png.webp?itok=Icvi-ek9"

result = model.run(input_image_url)

mask_objects = result["output"]

for (index, mask_object) in enumerate(mask_objects)
# depending on the model, there may be additional props returned
println(mask_object)

label = mask_object["label"]
score = mask_object["score"]
mask_png = mask_object["mask_png"]

println(Dict("label" => label, "score" => score))

mask_png_buffer = base64decode(mask_png)

open("$(WORKING_DIR)/mask-$index.png", "w") do file
write(file, mask_png_buffer)
end
end
20 changes: 20 additions & 0 deletions examples/julia/tasks/image_to_text/image_to_text.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
using Bytez

client = Bytez.init("YOUR BYTEZ KEY HERE")

input_image_url = "https://as1.ftcdn.net/v2/jpg/03/03/55/82/1000_F_303558268_YNUQp9NNMTE0X4zrj314mbWcDHd1pZPD.jpg"

model = client.model("Salesforce/blip-image-captioning-base")

model.load()

result = model.run(input_image_url)

# depending on the model, there may be additional props returned
output = result["output"]

println(output)

generated_text = output[1]["generated_text"]

println(generated_text)
Loading

0 comments on commit e0f6101

Please sign in to comment.