csv write and read

First_AE_Cost.append({'Iteration':epoch,'first_ae_cost':cost1})
with open(os.path.join(self.params['logdir'], 'first_ae_cost.csv'),"w",newline="") as csvfile:
fieldnames_of_cost1 = ['Iteration','first_ae_cost']
costwriter1 = csv.DictWriter(csvfile, fieldnames=fieldnames_of_cost1)
costwriter1.writeheader()
costwriter1.writerows(First_AE_Cost)

First_AE_Cost = []

data_path = os.path.join(os.path.dirname(os.path.realpath(file)), '../data')

[import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import json
import os
import numpy as np
"""
Using the plotter:

Call it from the command line, and supply it with logdirs to experiments.
Suppose you ran an experiment with name 'test', and you ran 'test' for 10
random seeds. The runner code stored it in the directory structure

data
L test_EnvName_DateTime
  L  0
    L log.txt
    L params.json
  L  1
    L log.txt
    L params.json
   .
   .
   .
  L  9
    L log.txt
    L params.json

To plot learning curves from the experiment, averaged over all random
seeds, call

python plot.py data/test_EnvName_DateTime --value AverageReturn

and voila. To see a different statistics, change what you put in for
the keyword --value. You can also enter /multiple/ values, and it will
make all of them in order.

Suppose you ran two experiments: 'test1' and 'test2'. In 'test2' you tried
a different set of hyperparameters from 'test1', and now you would like
to compare them -- see their learning curves side-by-side. Just call

python plot.py data/test1 data/test2

and it will plot them both! They will be given titles in the legend according
to their exp_name parameters. If you want to use custom legend titles, use
the --legend flag and then provide a title for each logdir.

"""

def plot_data(data, pic_path, value="first_ae_cost"):
#if isinstance(data, list):
# data = pd.concat(data, ignore_index=True)
plt.figure(figsize=(16.0, 8.0))
sns.set(style="darkgrid", font_scale=1.5)
sns.tsplot(data=data, time="Iteration", value=value, unit="Unit", condition="Condition")
plt.legend(loc='best').draggable()
# plt.legend(loc='best', bbox_to_anchor=(1, 1), fontsize=8).draggable()
#plt.show()
pig_name = os.path.join(pic_path, value + ".png")
plt.savefig(pig_name)
#plt.show()
plt.close()

def plot_one_data(data, pic_path, value="second_ae_cost"):
# only for the same train configuration
if isinstance(data, list):
#data = pd.concat(data, ignore_index=True)
avg_value = 0.0
for i in range(len(data)):
avg_value += (1/(i+1)) * (data[i][value] - avg_value)
new_data = pd.DataFrame([data[0]["Iteration"], data[0]["Unit"],data[0]["Condition"],avg_value]).T
new_data.columns=["Iteration","Unit","Condition",value]
data = new_data
#data = np.mean(data,0)
#print(data.shape)
#print(data)
plt.figure(figsize=(16.0, 8.0))
sns.set(style="darkgrid", font_scale=1.5)
sns.tsplot(data=data, time="Iteration", value=value, unit="Unit", condition="Condition")
plt.legend(labels=[value+"OverRuns"],loc='best')
# plt.legend(loc='best', bbox_to_anchor=(1, 1), fontsize=8).draggable()
#plt.show()
pig_name = os.path.join(pic_path, value + "Avg.png")
plt.savefig(pig_name)
#plt.show()
plt.close()

def get_datasets(fpath, condition=None):
unit = 0
datasets = []
for root, dir, files in os.walk(fpath):
if 'log.txt' in files:
param_path = open(os.path.join(root,'params.json'))
params = json.load(param_path)
#exp_name = params['exp_name']
exp_name = "2_ae"
log_path = os.path.join(root,'first_ae_cost.csv')
experiment_data = pd.read_table(log_path)

        experiment_data.insert(
            len(experiment_data.columns),
            'Unit',
            unit
            )        
        experiment_data.insert(
            len(experiment_data.columns),
            'Condition',
            condition or exp_name
            )

        datasets.append(experiment_data)
        unit += 1

return datasets

def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('logdir', nargs='')
parser.add_argument('--legend', nargs='
')
parser.add_argument('--value', default='first_ae_cost', nargs='*')
parser.add_argument('--fig_path', default='output_first_ae')
args = parser.parse_args()

pic_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), args.fig_path)
if not(os.path.exists(pic_path)):
    os.makedirs(pic_path)

use_legend = False
if args.legend is not None:
    assert len(args.legend) == len(args.logdir), \
        "Must give a legend title for each set of experiments."
    use_legend = True

data = []
if use_legend:
    for logdir, legend_title in zip(args.logdir, args.legend):
        data += get_datasets(logdir, legend_title)
else:
    for logdir in args.logdir:
        data += get_datasets(logdir)

if isinstance(args.value, list):
    values = args.value
else:
    values = [args.value]
for value in values:
    plot_data(data, pic_path, value=value)
    #plot_one_data(data, pic_path, value=value)

if name == "main":
main()
]()

[import numpy as np
from matplotlib import pyplot as plt

dat = []
def adddata(path):
#dat = []
a = np.loadtxt(path, skiprows=1, delimiter=",")
xaxis = np.asarray(a[:,0])
dat.append(a[:,1])
#dat = np.squeeze(dat)
return xaxis

"""
xaxis = adddata("dqn_AoI_AoI_AoI_Energy_26-02-2020_00-31-01batch_reward.csv")

adddata("run_dqn_q2_dqn_2_LunarLander-v2_20-10-2019_17-19-20-tag-Train_AverageReturn.csv")

adddata("run_dqn_q2_dqn_3_LunarLander-v2_20-10-2019_16-05-55-tag-Train_AverageReturn.csv")

plt.plot(xaxis, np.mean(dat, axis=0), label="DQN")

plt.plot(xaxis, dat[0], label="DQN_batch_reward")
plt.title("DQN batch_reward")
plt.xlabel("steps")
plt.ylabel("batch_reward")
plt.savefig("dqn_batch_reward.png")
plt.close()

dat = []
adddata("dqn_AoI_AoI_AoI_Energy_26-02-2020_00-31-01avg_return.csv")

adddata("run_dqn_double_q_q2_doubledqn_2_LunarLander-v2_20-10-2019_15-42-19-tag-Train_AverageReturn.csv")

adddata("run_dqn_double_q_q2_doubledqn_3_LunarLander-v2_20-10-2019_16-28-39-tag-Train_AverageReturn.csv")

plt.plot(xaxis, np.mean(dat, axis=0), label="Double DQN")

plt.plot(xaxis,dat[0],label="DQN_avg_return")
plt.legend()
plt.xlabel("steps")
plt.ylabel("avg_return")
plt.title("DQN avg_return")
plt.savefig("dqn_avg_return.png")
plt.close()

dat = []
xaxis = adddata("dqn_AoI_AoI_AoI_Energy_26-02-2020_00-31-01q_value.csv")

adddata("run_dqn_q2_dqn_2_LunarLander-v2_20-10-2019_17-19-20-tag-Train_AverageReturn.csv")

adddata("run_dqn_q2_dqn_3_LunarLander-v2_20-10-2019_16-05-55-tag-Train_AverageReturn.csv")

plt.plot(xaxis, dat[0],label="DQN_q_value")

plt.plot(xaxis, dat[1])

plt.plot(xaxis, dat[2])

plt.xlabel("steps")
plt.ylabel("q_value")
plt.title("DQN q_value")
plt.savefig("dqn_q_value.png")
plt.close()

dat = []
adddata("dqn_AoI_AoI_AoI_Energy_26-02-2020_00-31-01loss.csv")

adddata("run_dqn_double_q_q2_doubledqn_2_LunarLander-v2_20-10-2019_15-42-19-tag-Train_AverageReturn.csv")

adddata("run_dqn_double_q_q2_doubledqn_3_LunarLander-v2_20-10-2019_16-28-39-tag-Train_AverageReturn.csv")

plt.plot(xaxis, dat[0],label="DQN_loss")
plt.xlabel("steps")
plt.ylabel("loss")

plt.plot(xaxis, dat[1])

plt.plot(xaxis, dat[2])

plt.title("DQN loss")
plt.savefig("dqn_loss.png")
plt.close()
"""
dat = []
xaxis = adddata("dqn_double_q_seed1_true_true_two_ae_AoI_Energy_18-04-2020_13-28-42second_ae_cost.csv")

adddata("run_dqn_double_q_q2_doubledqn_2_LunarLander-v2_20-10-2019_15-42-19-tag-Train_AverageReturn.csv")

adddata("run_dqn_double_q_q2_doubledqn_3_LunarLander-v2_20-10-2019_16-28-39-tag-Train_AverageReturn.csv")

plt.plot(xaxis, dat[0],label="DQN_loss")
plt.xlabel("steps")
plt.ylabel("loss")

plt.plot(xaxis, dat[1])

plt.plot(xaxis, dat[2])

plt.title("DQN loss")
plt.savefig("dqn_loss.png")
plt.close()
]()

posted @ 2020-05-31 00:41  sailonzn  阅读(73)  评论(0编辑  收藏  举报