Skip to content

Commit

Permalink
[Refact] Refactoring code & Add docstring
Browse files Browse the repository at this point in the history
- Docstring을 이용하여 api에 대한 설명을 작성.
- 코드 리팩토링 진행
issue #91
  • Loading branch information
sooya233 committed Jun 9, 2022
1 parent 11d5772 commit 660779d
Show file tree
Hide file tree
Showing 7 changed files with 56 additions and 63 deletions.
41 changes: 11 additions & 30 deletions serving/backend/app/api/face.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ class People(BaseModel):
people_img: Dict[str, bytes]
file_name: str
message: Optional[str]
# created_at : datetime = Field(default_factory=datetime.now)


class ItemValue(BaseModel):
Expand All @@ -39,38 +38,29 @@ class ItemValue(BaseModel):
# get으로 바꾸어 사진 보여주기
@router.get("/show-people", description="face clustering으로 추출한 인물의 사진을 보여줍니다.")
def show_people(id: UUID):
# 사진 넣어주기
"""face clustering으로 추출한 인물의 사진을 보여주는 api
Args:
id (UUID): clustering 결과에 접근하기 위한, 식별자
Returns:
Response(dict):
id (str) : 영상을 구분할 수 있는 구분자.
people_img (dict) : 인물의 대표이미지를 gcs에 올린 후, gcs에 접근할 수 있는 url을 제공
"""

people_img = {}
result_path = os.path.join(FILE_DIR, str(id), 'result', 'result.npy')
result_data = np.load(result_path, allow_pickle=True).item()

# people_img = {}
# result_path = os.path.join(FILE_DIR, str(id), 'result')
# dir_list = os.listdir(result_path)
# people_list = [dir for dir in dir_list if dir.startswith('person')]

for person in result_data.keys():
# 현재는 첫 번째 이미지를 가져옴. 이후에 다른 이미지를 가져오는 알고리즘이 있다면 사용하기
# img_file = os.listdir(os.path.join(str(result_path), str(person)))[0] # first_image
# img_path = os.path.join(str(result_path), str(person), str(img_file))
img_path = result_data[person]['repr_img_path']

blob_dir = os.path.join(str(id), 'people', person)
blob = bucket.blob(blob_dir)
blob.upload_from_filename(img_path)
people_img[person] = blob_dir

# saving wav file to GCS for STT
server_path = os.path.join(FILE_DIR, str(id))
wav_path = os.path.join(server_path, 'original_audio')
original_file = [ori_file for ori_file in os.listdir(server_path) if ori_file.startswith('original.')]
if original_file:
video_path = os.path.join(server_path, original_file[0])
os.system('ffmpeg -i {} -acodec pcm_s16le -ar 16000 {}.wav'.format(video_path, wav_path))
blob = bucket.blob(os.path.join(str(id), 'original_audio.wav'))
blob.upload_from_filename(wav_path + '.wav')

# print(people_img)
return {"id": id, "people_img": people_img}


Expand Down Expand Up @@ -101,14 +91,5 @@ def get_timeline_face(info: dict):

save_path = os.path.join(FILE_DIR, info['id'], 'face_timelines.npy')
np.save(save_path, timelines)
# timelines = {}

# for face in info['face']:
# image_file = os.listdir(os.path.join(result_path, 'result', face))[0]

# image = os.path.join(result_path, 'result', face, image_file)

# timeline = FaceRecognition(video, [image])
# timelines[face] = timeline
# FE에서 선택한 사람을 받아 face recognition 진행 예정
return {"id" : info['id']}
32 changes: 23 additions & 9 deletions serving/backend/app/api/video.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,6 @@
bucket_name = 'snowman-bucket'
bucket = storage_client.bucket(bucket_name)


# # google cloud storage
# from google.cloud import storage
# storage_client = storage.Client()
# bucket_name = 'snowman-bucket'
# bucket = storage_client.batch(bucket_name)

from ml.face_functions import FaceClustering


Expand All @@ -38,8 +31,18 @@ class Video(BaseModel):

@router.post("/upload-video", description="비디오를 업로드합니다.")
def create_video_file(file: UploadFile = File(...)):
"""Upload video to server, and execute Face Clustering
Args:
file (UploadFile, optional): formData come from GCP Server. Defaults to File(...).
Returns:
Video (BaseModel):
id (UUID): 비디오를 구분할 수 있는 구분자
file_name (str): 파일의 이름
created_at (datetime): 파일이 업로드된 시간.
"""
new_video = Video(file_name=file.filename)
# video_contents = await file.read()
os.makedirs(os.path.join(FILE_DIR, str(new_video.id)))
id_path = os.path.join(FILE_DIR, str(new_video.id))
server_path = os.path.join(id_path, ('original' + os.path.splitext(file.filename)[1]))
Expand All @@ -53,13 +56,24 @@ def create_video_file(file: UploadFile = File(...)):

class YTVideo(BaseModel):
id: UUID = Field(default_factory=uuid4)
video: Optional[str]
file_name: str
created_at : datetime = Field(default_factory=datetime.now)


@router.post("/upload-video-youtube", description="유튜브 URL을 이용하여 비디오를 업로드합니다.")
def create_video_file_from_youtube(info: dict):
"""유튜브 URL을 이용하여 비디오를 업로드하는 api
Args:
info (dict):
url (str): youtube영상을 업로드 하기 위한 url을 담고 있음
Returns:
YTVideo (BaseModel):
id (UUID): 비디오를 구분할 수 있는 구분자
file_name (str): 파일의 이름
created_at (datetime) : 파일이 업로드 된 시간
"""
yt_video = YouTube(info['url'])
new_video = YTVideo(file_name=yt_video.title + '.mp4')

Expand Down
4 changes: 0 additions & 4 deletions serving/backend/app/ml/face_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@

########## Face Clustering ############
def FaceClustering(video_path: str = "", save_dir:str = ""):
# Load config
# cfg = load_json('./ml/config.json')

# Initialize Face Extractor
extractor = FaceExtractor(
Expand Down Expand Up @@ -62,8 +60,6 @@ def FinalTimeline(laugh_timeline : list, people_timeline : dict, id : str):

shorts = []
for target_person in iter(people_timeline.keys()):
print(target_person)
print(people_timeline[target_person])
if people_timeline[target_person] == [[]]:
continue
final_timeline, total_length = make_final_timeline(laugh_timeline, people_timeline[target_person])
Expand Down
2 changes: 1 addition & 1 deletion serving/backend/app/ml/final_shorts/final_timeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def make_final_timeline(laughter_timeline,person_timeline,max_length=None):
sorted_timeline = sorted(final,key=lambda x:-x[2])
choose_index = [False for _ in range(len(sorted_timeline))]
total_length = 0
for start,end,interest,ratio in sorted_timeline:
for start,end,interest,ratio, duration in sorted_timeline:
length = end - start
if total_length + length > max_length:
break
Expand Down
18 changes: 0 additions & 18 deletions serving/backend/app/ml/final_shorts/make_shorts.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,24 +17,17 @@ def make_shorts(final_highlights, total_length, id, target_person):
SHORTS_STORAGE_DIR = os.path.join(id, 'shorts')
os.makedirs(SHORTS_DIR, exist_ok=True)


VIDEO_DIR = os.path.join(FILE_DIR, id, "original.mp4")
print(VIDEO_DIR)

# CURRENT_DIR = os.getcwd()

in_file = ffmpeg.input(VIDEO_DIR)

target_person_shorts = []
for idx, (start, end, interest, ratio, during) in enumerate(final_highlights):
print("Making Clips...")
HIGHLIGHT_PATH = os.path.join(SHORTS_DIR, f"short_{target_person}_{idx}.mp4")

# save to gcs
HIGHLIGHT_STORAGE_DIR = os.path.join(SHORTS_STORAGE_DIR, f"short_{target_person}_{idx}.mp4")
blob = bucket.blob(HIGHLIGHT_STORAGE_DIR)

trim_and_fade(VIDEO_DIR, start, end, HIGHLIGHT_PATH)

vid = (
in_file.video
Expand All @@ -59,14 +52,3 @@ def make_shorts(final_highlights, total_length, id, target_person):
target_person_shorts.append([target_person, HIGHLIGHT_STORAGE_DIR, during, interest])

return target_person_shorts


def trim_and_fade(original_path, start, end, save_path):
print(f"start making {save_path}")
os.system(f'ffmpeg -ss {start} -i {original_path} -to {end} -filter_complex \
"fade=in:st={start}:d=1, fade=out:st={end-1}:d=1; \
afade=in:st={start}:d=1, afade=out:st={end-1}:d=1" \
-c:v copy -c:a copy {save_path}')
# os.system(f'ffmpeg -i {original_path} -vf "trim=start={start}:end={end}, fade=in:st={start}:d=1, fade=out:st={end-1}:d=1" \
# -af "atrim=start={start}:end={end}, afade=in:st={start}:d=1, afade=out:st={end-1}:d=1"\
# {save_path}')
21 changes: 21 additions & 0 deletions serving/backend/app_laughter/api/laughter.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,16 @@ class VideoTimeline(BaseModel):

@router.post("/laughter-detection", description="laughter timeline을 추출하는 전과정을 수행합니다.")
def laughter_detection(file: UploadFile = File(...)):
"""Upload된 영상을 통하여 laughter timeline을 추출하는 전 과정을 수행합니다.
Args:
file (UploadFile, optional): formData로 보내진 video data. Defaults to File(...).
Returns:
VideoTimeline (BaseModel):
id (UUID): 영상을 구분할 수 있는 구분자.
laugh (List[Tuple], optional): 전체 영상에 대하여 웃음에 해당하는 timeline.
"""
# download the uploaded video
new_video = Video(file_name=file.filename)
os.makedirs(os.path.join(FILE_DIR, str(new_video.id_laughter)))
Expand All @@ -56,6 +66,17 @@ def laughter_detection(file: UploadFile = File(...)):

@router.post("/laughter-detection-youtube", description="유튜브 URL을 이용하여 laughter timeline을 추출하는 전과정을 수행합니다.")
def laughter_detection_from_youtube(info: dict):
"""유튜브 URL을 이용하여 laughter timeline을 추출하는 전 과정을 수행합니다.
Args:
info (dict):
url (str): 유튜브 영상 url
Returns:
VideoTimeline:
id (UUID): 영상을 구분할 수 있는 구분자.
laugh (List[Tuple], optional): 전체 영상에 대하여 웃음에 해당하는 timeline.
"""
yt_video = YouTube(info['url'])
new_video = Video(file_name=yt_video.title + '.mp4')

Expand Down
1 change: 0 additions & 1 deletion serving/backend/app_laughter/ml/laughter_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,5 @@ def LaughterDetection(video_path: str = "", wav_path: str = "", ml_path: str = "

# calculate interest and make final timeline
final_laughter_timeline = detector.calculate_interest(laughter_timeline)
print(final_laughter_timeline)

return final_laughter_timeline

0 comments on commit 660779d

Please sign in to comment.