Skip to content

Commit

Permalink
Merge pull request #3328 from vladmandic/dev
Browse files Browse the repository at this point in the history
merge
  • Loading branch information
vladmandic authored Jul 10, 2024
2 parents 4a03368 + 8e170f5 commit 2ec6e9e
Show file tree
Hide file tree
Showing 5 changed files with 33 additions and 29 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ This release is primary service release with cumulative fixes and several improv
- fix control input type video
- fix reset pipeline at the end of each iteration
- fix faceswap when no faces detected
- fix civitai search
- multiple ModernUI fixes

## Update for 2024-06-23
Expand Down
15 changes: 7 additions & 8 deletions modules/model_kolors.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,25 @@
import torch
import transformers
import diffusers


repo_id = 'Kwai-Kolors/Kolors'
encoder_id = 'THUDM/chatglm3-6b'


def load_kolors(_checkpoint_info, diffusers_load_config={}):
from modules import shared, devices, modelloader
modelloader.hf_login()
diffusers_load_config['variant'] = "fp16"
if 'torch_dtype' not in diffusers_load_config:
diffusers_load_config['torch_dtype'] = 'torch.float16'
diffusers_load_config['torch_dtype'] = torch.float16

text_encoder = transformers.AutoModel.from_pretrained(encoder_id, torch_dtype=torch.float16, trust_remote_code=True, cache_dir=shared.opts.diffusers_dir)
# import torch
# import transformers
# encoder_id = 'THUDM/chatglm3-6b'
# text_encoder = transformers.AutoModel.from_pretrained(encoder_id, torch_dtype=torch.float16, trust_remote_code=True, cache_dir=shared.opts.diffusers_dir)
# text_encoder = transformers.AutoModel.from_pretrained("THUDM/chatglm3-6b", torch_dtype=torch.float16, trust_remote_code=True).quantize(4).cuda()
tokenizer = transformers.AutoTokenizer.from_pretrained(encoder_id, trust_remote_code=True, cache_dir=shared.opts.diffusers_dir)
pipe = diffusers.StableDiffusionXLPipeline.from_pretrained(
# tokenizer = transformers.AutoTokenizer.from_pretrained(encoder_id, trust_remote_code=True, cache_dir=shared.opts.diffusers_dir)
pipe = diffusers.KolorsPipeline.from_pretrained(
repo_id,
tokenizer=tokenizer,
text_encoder=text_encoder,
cache_dir = shared.opts.diffusers_dir,
**diffusers_load_config,
)
Expand Down
3 changes: 3 additions & 0 deletions modules/modelloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,9 @@ def download_civit_model_thread(model_name, model_url, model_path, model_type, t
elif model_type == 'Embedding':
model_file = os.path.join(shared.opts.embeddings_dir, model_path, model_name)
temp_file = os.path.join(shared.opts.embeddings_dir, model_path, temp_file)
elif model_type == 'VAE':
model_file = os.path.join(shared.opts.vae_dir, model_path, model_name)
temp_file = os.path.join(shared.opts.vae_dir, model_path, temp_file)
else:
model_file = os.path.join(shared.opts.ckpt_dir, model_path, model_name)
temp_file = os.path.join(shared.opts.ckpt_dir, model_path, temp_file)
Expand Down
6 changes: 4 additions & 2 deletions modules/textual_inversion/textual_inversion.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,8 @@ def insert_tokens(embeddings: list, tokenizers: list):
"""
tokens = []
for embedding in embeddings:
tokens += embedding.tokens
if embedding is not None:
tokens += embedding.tokens
for tokenizer in tokenizers:
tokenizer.add_tokens(tokens)

Expand Down Expand Up @@ -295,7 +296,8 @@ def load_diffusers_embedding(self, filename: Union[str, List[str]] = None, data:
(len(embedding.vector_sizes) < len(hiddensizes) and len(embedding.vector_sizes) != 2)): # SD3 no T5
embedding.tokens = []
self.skipped_embeddings[embedding.name] = embedding
except Exception:
except Exception as e:
shared.log.error(f'Embedding invalid: name="{embedding.name}" fn="{filename}" {e}')
self.skipped_embeddings[embedding.name] = embedding
if overwrite:
shared.log.info(f"Loading Bundled embeddings: {list(data.keys())}")
Expand Down
37 changes: 18 additions & 19 deletions modules/ui_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -418,41 +418,41 @@ def hf_download_model(hub_id: str, token, variant, revision, mirror, custom_pipe
def civit_search_model(name, tag, model_type):
# types = 'LORA' if model_type == 'LoRA' else 'Checkpoint'
url = 'https://civitai.com/api/v1/models?limit=25&Sort=Newest'
if model_type == 'SD 1.5' or model_type == 'SD XL':
if model_type == 'Model':
url += '&types=Checkpoint'
elif model_type == 'LoRA':
url += '&types=LORA'
url += '&types=LORA&types=DoRA&types=LoCon'
elif model_type == 'Embedding':
url += '&types=TextualInversion'
elif model_type == 'VAE':
url += '&types=VAE'
if name is not None and len(name) > 0:
url += f'&query={name}'
if tag is not None and len(tag) > 0:
url += f'&tag={tag}'
r = req(url)
log.debug(f'CivitAI search: name="{name}" tag={tag or "none"} url="{url}" status={r.status_code}')
if r.status_code != 200:
log.warning(f'CivitAI search: name="{name}" tag={tag} status={r.status_code}')
return [], gr.update(visible=False, value=[]), gr.update(visible=False, value=None), gr.update(visible=False, value=None)
try:
body = r.json()
except Exception as e:
log.error(f'CivitAI search: name="{name}" tag={tag} {e}')
return [], gr.update(visible=False, value=[]), gr.update(visible=False, value=None), gr.update(visible=False, value=None)
body = r.json()
nonlocal data
data = body.get('items', [])
data1 = []
for model in data:
found = 0
if model_type == 'LoRA' and model['type'] in ['LORA', 'LoCon']:
if model_type == 'LoRA' and model['type'].lower() in ['lora', 'locon', 'dora', 'lycoris']:
found += 1
elif model_type == 'Embedding' and model['type'] == 'TextualInversion':
elif model_type == 'Embedding' and model['type'].lower() in ['textualinversion', 'embedding']:
found += 1
elif model_type == 'Model' and model['type'].lower() in ['checkpoint']:
found += 1
elif model_type == 'VAE' and model['type'].lower() in ['vae']:
found += 1
elif model_type.startswith('SD') and model['type'] == 'Checkpoint':
for variant in model['modelVersions']:
if model_type == 'SD 1.5':
if 'SD 1.' in variant['baseModel']:
found += 1
if model_type == 'SD XL':
if 'SDXL' in variant['baseModel']:
found += 1
else:
if 'SD 1.' not in variant['baseModel'] and 'SDXL' not in variant['baseModel']:
found += 1
elif model_type == 'Other':
found += 1
if found > 0:
Expand All @@ -464,8 +464,7 @@ def civit_search_model(name, tag, model_type):
model['stats']['rating']
])
res = f'Search result: name={name} tag={tag or "none"} type={model_type} models={len(data1)}'
return res, gr.update(visible=len(data1) > 0, value=data1 if len(data1) > 0 else []), gr.update(
visible=False, value=None), gr.update(visible=False, value=None)
return res, gr.update(visible=len(data1) > 0, value=data1 if len(data1) > 0 else []), gr.update(visible=False, value=None), gr.update(visible=False, value=None)

def civit_select1(evt: gr.SelectData, in_data):
model_id = in_data[evt.index[0]][0]
Expand Down Expand Up @@ -596,7 +595,7 @@ def civit_search_metadata(rehash, title):
gr.HTML('<h2>Search for models</h2>')
with gr.Row():
with gr.Column(scale=1):
civit_model_type = gr.Dropdown(label='Model type', choices=['SD 1.5', 'SD XL', 'LoRA', 'Embedding', 'Other'], value='LoRA')
civit_model_type = gr.Dropdown(label='Model type', choices=['Model', 'LoRA', 'Embedding', 'VAE', 'Other'], value='Model')
with gr.Column(scale=15):
with gr.Row():
civit_search_text = gr.Textbox('', label='Search models', placeholder='keyword')
Expand Down

0 comments on commit 2ec6e9e

Please sign in to comment.