TFRecords是tensorflow存储数据的一种二进制文件,能更好的利用内存,更方便复制和移动,并且不需要单独的标签文件,类似于caffe中的LMDB和LvevelDB,极大的提高了IO吞吐。
TFRecords文件包含了tf.train.Example 协议内存块(protocol buffer)(协议内存块包含了字段 Features)。我们可以写一段代码获取你的数据,将数据填入到Example协议内存块(protocol buffer),将协议内存块序列化为一个字符串, 并且通过tf.python_io.TFRecordWriter 写入到TFRecords文件。
从TFRecords文件中读取数据, 可以使用tf.TFRecordReader的tf.parse_single_example解析器。这个操作可以将Example协议内存块(protocol buffer)解析为张量。
优势:
第一,tensorflow里的graph能够记住状态(state),这使得TFRecordReader能够记住tfrecord的位置,并且始终能返回下一个。而这就要求我们在使用之前,必须初始化整个graph,这里我们使用了函数tf.initialize_all_variables()来进行初始化。
第二,tensorflow中的队列和普通的队列差不多,不过它里面的operation和tensor都是符号型的(symbolic),在调用sess.run()时才执行。
第三, TFRecordReader会一直弹出队列中文件的名字,直到队列为空。
import os import tensorflow as tf from PIL import Image os.environ["CUDA_VISIBLE_DEVICES"] = "0" def data_to_tfrecord(images, labels, filename): # Save data into TFRecord if os.path.isfile(filename): print("%s exists" % filename) return print("Converting data into %s ..." % filename) cwd = os.getcwd() writer = tf.python_io.TFRecordWriter(filename) for index, img_name in enumerate(images): print(index) img = Image.open(img_name) img = img.resize((28, 28)) img_raw = img.tobytes() example = tf.train.Example(features=tf.train.Features(feature={ "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[int(labels[index])])), 'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])), })) writer.write(example.SerializeToString()) # Serialize To String writer.close() def tfrecord_to_data(filename): # generate a queue with a given file name print("reading tfrecords from {}".format(filename)) filename_queue = tf.train.string_input_producer([filename]) reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example(serialized_example,features={ 'label': tf.FixedLenFeature([], tf.int64), 'img_raw': tf.FixedLenFeature([], tf.string), }) img = tf.decode_raw(features['img_raw'], tf.uint8) img = tf.reshape(img, [28, 28, 3]) img = tf.cast(img, tf.float32) * (1. / 255) - 0.5 label = tf.cast(features['label'], tf.int64) return img, label def decode_tfrecords(filename): #show the tfrecords for serialized_example in tf.python_io.tf_record_iterator(filename): example = tf.train.Example() example.ParseFromString(serialized_example) image = example.features.feature['img_raw'].bytes_list.value label = example.features.feature['label'].int64_list.value print(image, label) def read_data_from_paths(file_path, name): labels = [] file_names = [] file_name = os.path.join(file_path, name) train_txt = open(file_name,'r') for idx in train_txt: idx=idx.rstrip('\n') spt = idx.split(' ') file_names.append(os.path.join(file_path, spt[0])) labels.append(spt[1]) return file_names, labels def train(): #network batch_size = 64 inputs = tf.placeholder(tf.float32, [batch_size, 28, 28, 3], name='inputs') conv1 = tf.layers.conv2d(inputs=inputs, filters=64, kernel_size=(3, 3), padding="same", activation=None) pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) conv2 = tf.layers.conv2d(inputs=pool1, filters=128, kernel_size=(3, 3), padding="same", activation=None) pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 128]) fc1 = tf.layers.dense(pool2_flat, 500, activation=tf.nn.relu) fc2 = tf.layers.dense(fc1, 10, activation=tf.nn.relu) y_out = tf.nn.softmax(fc2) y_ = tf.placeholder(tf.float32, [batch_size, 10]) cross_entropy = -tf.reduce_mean(y_ * tf.log(y_out)) # 计算交叉熵 learning_rate=1e-3 train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y_out, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) img, label = tfrecord_to_data("./mnist_train.tfrecords") img_batch, label_batch = tf.train.shuffle_batch([img, label],batch_size=batch_size, capacity=2000,min_after_dequeue=1000) init = tf.global_variables_initializer() with tf.Session() as session: session.run(init) threads = tf.train.start_queue_runners() saver = tf.train.Saver(tf.global_variables(), max_to_keep=20) for i in range(400): img_batch_i, label_batch_i = session.run([img_batch, tf.one_hot(label_batch, depth=10)]) feed = {inputs: img_batch_i, y_: label_batch_i} loss,_,acc=session.run([cross_entropy,train_step,accuracy], feed_dict=feed) print("step%d loss:%f accuracy:%F"%(i,loss,acc)) if i>100: learning_rate=learning_rate*0.1 saver.save(session, "./save/mnist.ckpt") if __name__=="__main__": #function1,image to tfrecords,not use comment file_path="" name="test.txt" file_names, labels=read_data_from_paths(file_path, name) tfrecord_name="mnist_test.tfrecords" data_to_tfrecord(file_names, labels, tfrecord_name) #function2,decode tfrecords,not use comment decode_tfrecords("./mnist_test.tfrecords") decode_tfrecords("E:/ocr_pdf_rec/crnn_v2.2/train/data/test_2k.tfrecords") #function3,tfrecords to images,not use comment train()