Page MenuHomec4science

datareader.py
No OneTemporary

File Metadata

Created
Mon, May 27, 17:10

datareader.py

import pickle
import os
import pickle
import argparse
import time
import torch
from torch.autograd import Variable
import numpy as np
#import utils
def DataPaths():
'''
returns the path of all videos for each scene
'''
#biwi datasets
train_biwi = np.array(['project/datasets/Trajnet/train/biwi/biwi_hotel.txt'])
#crowd dataset
train_crowd = 'project/datasets/Trajnet/train/crowds/'
train_crowd1=np.array(['crowds_zara02.txt','students001.txt','arxiepiskopi1.txt','crowds_zara03.txt'])
train_crowd= [train_crowd+x for x in train_crowd1]
# mot
train_mot = np.array(['project/datasets/Trajnet/train/mot/PETS09-S2L1.txt'])
#bookstore, coupa, dCircle, gates, hyang, nexus
stanford_mainpath = 'project/datasets/Trajnet/train/stanford/'
train_bstore = np.array(['bookstore_0.txt','bookstore_1.txt','bookstore_2.txt','bookstore_3.txt'])
train_bstore = [stanford_mainpath+x for x in train_bstore]
train_coupa = 'coupa_3.txt'
train_coupa = np.array([stanford_mainpath+train_coupa])
train_dCircle = np.array(['deathCircle_0.txt','deathCircle_1.txt','deathCircle_2.txt','deathCircle_3.txt','deathCircle_4.txt'])
train_dCircle = [stanford_mainpath+x for x in train_dCircle]
train_gates = np.array(['gates_0.txt','gates_1.txt','gates_3.txt','gates_4.txt','gates_5.txt','gates_6.txt','gates_7.txt','gates_8.txt'])
train_gates = [stanford_mainpath+x for x in train_gates]
train_hyang = np.array(['hyang_4.txt','hyang_5.txt','hyang_6.txt','hyang_7.txt','hyang_9.txt'])
train_hyang = [stanford_mainpath+x for x in train_hyang]
train_nexus = np.array(['nexus_0.txt','nexus_1.txt','nexus_2.txt','nexus_3.txt','nexus_4.txt','nexus_7.txt','nexus_8.txt','nexus_9.txt'])
train_nexus = [stanford_mainpath+x for x in train_nexus]
#merge all together in path_dataset
path_dataset=[train_biwi, train_crowd, train_mot, train_bstore, train_coupa, train_dCircle, train_gates, train_hyang, train_nexus]
return path_dataset
path_dataset = DataPaths()
#print(path_dataset[1][1])
def DataLoader(path_dataset):
'''
loads train datasets
input: path_dataset is the path generated with DataPath()
return: raw data of dataset
'''
print(path_dataset)
#For each dataset
for directory in range(0,len(path_dataset)):
data = np.genfromtxt(path_dataset[directory], dtype = 'float')
frameList = np.unique(data[:,0])
pedID = np.unique(data[:,1])
print('Number of frames: %f' %len(frameList))
print('Number of pedestrians: %f' %len(pedID))
return data, frameList, pedID
#how to load biwi for example
#biwi = DataLoader(path_dataset[0])
#print(biwi)
def TestLoader(filename):
'''
loads test datasets
input: path to test file
return: raw data of dataset
'''
data = np.genfromtxt(filename, dtype = 'float')
frameList = np.unique(data[:,0])
pedID = np.unique(data[:,1])
print('Number of frames: %f' %len(frameList))
print('Number of pedestrians: %f' %len(pedID))
return data, frameList, pedID

Event Timeline