VisualChatGPTのエラーが起こりました直したいです
VisualChatGPTのエラーが起こりました直したいです
手順通りにopenAIのAPIをしっかり入力したのにも関わらずエラーが発生しました
解決方法を教えて下さると助かります!!!
404 Client Error: Not Found for url:
The above exception was the direct cause of the following exception:
/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py:259 in │
│ hf_raise_for_status │
│ │
│ 256 │ </Tip> │
│ 257 │ """ │
│ 258 │ try: │
│ ❱ 259 │ │ response.raise_for_status() │
│ 260 │ except HTTPError as e: │
│ 261 │ │ error_code = response.headers.get("X-Error-Code") │
│ 262 │
│ │
│ /usr/local/lib/python3.10/dist-packages/requests/models.py:960 in raise_for_status │
│ │
│ 957 │ │ │ http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, rea │
│ 958 │ │ │
│ 959 │ │ if http_error_msg: │
│ ❱ 960 │ │ │ raise HTTPError(http_error_msg, response=self) │
│ 961 │ │
│ 962 │ def close(self): │
│ 963 │ │ """Releases the connection back to the pool. Once this method has been
404 Client Error: Not Found for url:
https://huggingface.co/api/models/runwayml/stable-diffusion-inpainting/revision/fp16 (Request ID:
Root=1-645a5937-1be37917605e0d8c03c20db9)
Sorry, we can't find the page you are looking for.
in <cell line: 6>:6 │
│ │
│ /content/TaskMatrix/visual_chatgpt.py:1262 in __init__ │
│ │
│ 1259 │ │ self.models = {} │
│ 1260 │ │ # Load Basic Foundation Models │
│ 1261 │ │ for class_name, device in load_dict.items(): │
│ ❱ 1262 │ │ │ self.models[class_name] = globals()[class_name](device=device) │
│ 1263 │ │ │
│ 1264 │ │ # Load Template Foundation Models │
│ 1265 │ │ for class_name, module in globals().items(): │
│ │
│ /content/TaskMatrix/visual_chatgpt.py:1059 in __init__ │
│ │
│ 1056 │ │ self.revision = 'fp16' if 'cuda' in self.device else None │
│ 1057 │ │ self.torch_dtype = torch.float16 if 'cuda' in self.device else torch.float32 │
│ 1058 │ │ │
│ ❱ 1059 │ │ self.inpaint = StableDiffusionInpaintPipeline.from_pretrained( │
│ 1060 │ │ │ "runwayml/stable-diffusion-inpainting", revision=self.revision, torch_dtype= │
│ 1061 │ def __call__(self, prompt, image, mask_image, height=512, width=512, num_inference_s │
│ 1062 │ │ update_image = self.inpaint(prompt=prompt, image=image.resize((width, height)), │
│ │
│ /usr/local/lib/python3.10/dist-packages/diffusers/pipelines/pipeline_utils.py:884 in │
│ from_pretrained │
│ │
│ 881 │ │ # 1. Download the checkpoints and configs │
│ 882 │ │ # use snapshot download here to get it working from from_pretrained │
│ 883 │ │ if not os.path.isdir(pretrained_model_name_or_path): │
│ ❱ 884 │ │ │ cached_folder = cls.download( │
│ 885 │ │ │ │ pretrained_model_name_or_path, │
│ 886 │ │ │ │ cache_dir=cache_dir, │
│ 887 │ │ │ │ resume_download=resume_download, │
│ │
│ /usr/local/lib/python3.10/dist-packages/diffusers/pipelines/pipeline_utils.py:1218 in download │
│ │
│ 1215 │ │ │ │ resume_download=resume_download, │
│ 1216 │ │ │ │ use_auth_token=use_auth_token, │
│ 1217 │ │ │ ) │
│ ❱ 1218 │ │ │ info = model_info( │
│ 1219 │ │ │ │ pretrained_model_name, │
│ 1220 │ │ │ │ use_auth_token=use_auth_token, │
│ 1221 │ │ │ │ revision=revision, │
│ │
│ /usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py:120 in _inner_fn │
│ │
│ 117 │ │ if check_use_auth_token: │
│ 118 │ │ │ kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__name__, has_token=ha │
│ 119 │ │ │
│ ❱ 120 │ │ return fn(*args, **kwargs) │
│ 121 │ │
│ 122 │ return _inner_fn # type: ignore │
│ 123 │
│ │
│ /usr/local/lib/python3.10/dist-packages/huggingface_hub/hf_api.py:1604 in model_info │
│ │
│ 1601 │ │ if files_metadata: │
│ 1602 │ │ │ params["blobs"] = True │
│ 1603 │ │ r = get_session().get(path, headers=headers, timeout=timeout, params=params) │
│ ❱ 1604 │ │ hf_raise_for_status(r) │
│ 1605 │ │ d = r.json() │
│ 1606 │ │ return ModelInfo(**d) │
│ 1607 │
│ │
│ /usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py:301 in │
│ hf_raise_for_status │
│ │
│ 298 │ │ │
│ 299 │ │ # Convert `HTTPError` into a `HfHubHTTPError` to display request information │
│ 300 │ │ # as well (request id and/or server error message) │
│ ❱ 301 │ │ raise HfHubHTTPError(str(e), response=response) from e │
│ 302 │
│ 303 │
│ 304 def _format_error_message(message: str, request_id: Optional[str], server_message: Optio
こちらのリンクです
https://colab.research.google.com/drive/1P3jJqKEWEaeNcZg8fODbbWeQ3gxOHk2-?usp=sharing
この記事通りに動かしました
https://dev.classmethod.jp/articles/tried-visual-chatgpt/#toc-7
一応openAPIのURLです
https://platform.openai.com/account/api-keys
本当にプログラミングがわからないです!優しく教えてくれたら助かります!!
0 likes